text
stringlengths
6
947k
repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
#!/usr/bin/env python import bottle from bottle import ( get, run, abort, static_file, template ) import thisplace example_locs = [("sydney", (-33.867480754852295, 151.20700120925903)), ("battery", (40.70329427719116, -74.0170168876648)), ("san_fran", (37.790114879608154, -122.4202036857605))] example_locs = dict((name, thisplace.four_words(pos)) for name,pos in example_locs) @get('/static/<filename:path>') def serve_static(filename): return static_file(filename, root='static') @get('/') def index(): return template('map', lat=None, lng=None) @get('/help.html') def help(): return template('help', err=None, **example_locs) @get('/<fourwords>') def showMap(fourwords): try: lat, lng = thisplace.decode(fourwords) return template('map', lat=lat, lng=lng, fourwords=fourwords) except: return template('help', err="Could not find location {}".format(fourwords), **example_locs) @get('/latlng/<lat:float>,<lng:float>') def showMapFromLatLng(lat, lng): try: fourwords = thisplace.four_words((lat, lng)) return template('map', lat=lat, lng=lng, fourwords=fourwords) except: return template('index', err="Could not find location {}".format(fourwords), **example_locs) # API @get('/api/<lat:float>,<lng:float>') def latLngToHash(lat, lng): try: three = thisplace.three_words((lat,lng)) four = thisplace.four_words((lat,lng)) six = thisplace.six_words((lat,lng)) return {'three': three, 'four': four, 'six': six} except: return {} @get('/api/<fourwords>') def hashToLatLng(fourwords): try: lat,lng = thisplace.decode(fourwords) return {"lat": lat, "lng": lng} except: abort(404) if __name__ == '__main__': run(host='localhost', port=8080) app = bottle.default_app()
amueller/ThisPlace
app.py
Python
mit
1,992
0.006024
from decouple import config from selenium import webdriver HOME = config('HOME') # page = webdriver.Firefox() page = webdriver.Chrome(executable_path=HOME + '/chromedriver/chromedriver') page.get('http://localhost:8000/admin/login/') # pegar o campo de busca onde podemos digitar algum termo campo_busca = page.find_element_by_id('id_username') campo_busca.send_keys('admin') campo_busca = page.find_element_by_id('id_password') campo_busca.send_keys('demodemo') # button = page.findElement(By.cssSelector("input[type='submit']")) button = page.find_element_by_xpath("//input[@type='submit']") button.click()
rg3915/orcamentos
selenium/selenium_login.py
Python
mit
613
0
# -*- coding: UTF-8 -*- import datetime import json import logging import re from mercado.core.base import Mercado from mercado.core.common import nt_merge log = logging.getLogger(__name__) class Safeway(Mercado): def __init__(self, auth, urls, headers, sleep_multiplier=1.0): self.auth = auth self.urls = urls self.headers = headers self.raw_unclipped_list = [] self.prepared_coupon_list = [] self.store_id = 0 self.sleep_multiplier = sleep_multiplier def execute(self): self._init_session() self._login() self._get_store_id() self._get_unclipped_coupon_list() self._get_coupon_details() self._post_coupon() def _login(self): log.info("Logging in as {}".format(self.auth.get("username"))) login_data = json.dumps( {"source": "WEB", "rememberMe": False, "userId": self.auth.get("username"), "password": self.auth.get("password")}) rsp = self._run_request(self.urls.login, login_data, self.headers.extra) rsp_data = json.loads(rsp.content.decode("UTF-8")) if not rsp_data.get("token") or rsp_data.get("errors"): raise Exception("Authentication failure") self.headers.extra.update( {"X-swyConsumerDirectoryPro": self.r_s.cookies.get_dict().get("swyConsumerDirectoryPro")}) self.headers.extra.update( {"X-swyConsumerlbcookie": self.r_s.cookies.get_dict().get("swyConsumerlbcookie")}) def _get_store_id(self): log.info("Determining Safeway store ID") rsp = self._run_request(self.urls.store, data=None, headers=self.headers.extra) r_dict = json.loads(rsp.text) if str(r_dict.get("code")).lower() != "success": log.warn("Unable to retrieve store ID, might not be able to pull all coupons back") else: definitions = r_dict.get("definitions")[0] self.store_id = definitions.get("values")[0] log.info("Determined store ID [{0}] as the preferred store".format(self.store_id)) def _get_unclipped_coupon_list(self): rsp = self._run_request(self.urls.listing.format(self.store_id), headers=nt_merge(self.headers)) coupons = json.loads(rsp.text) self.raw_unclipped_list = [offer for offer in coupons["offers"] if offer["clipStatus"] != "C" and offer["listStatus"] != "S"] log.info("Retrieved [{0}] unclipped coupons".format(len(self.raw_unclipped_list))) def _get_coupon_details(self): for coupon in self.raw_unclipped_list: detailed_coupon = dict(oid=coupon["offerId"], ots=coupon["offerTs"], opgm=coupon["offerPgm"]) url = self.urls.details.format(detailed_coupon.get("oid"), detailed_coupon.get("ots")) rsp = self._run_request(url, headers=nt_merge(self.headers)) coupon_data = json.loads(rsp.content.decode("UTF-8")) detailed_coupon.update(dict(vbc=coupon_data.get("vndrBannerCd", ""))) extra_cpn_detail = self._extract_coupon_detail(coupon_data) detailed_coupon.update(extra_cpn_detail) self.prepared_coupon_list.append(detailed_coupon) log.info("Retrieved details for {} coupons".format(len(self.prepared_coupon_list))) def _post_coupon(self): self.headers.extra.update({"Referer": self.urls.referer}) for index, coupon in enumerate(self.prepared_coupon_list): items = json.dumps({ "items": [{ "clipType": "C", "itemId": coupon["oid"], "itemType": coupon["opgm"], "vndrBannerCd": coupon["vbc"] }, { "clipType": "L", "itemId": coupon["oid"], "itemType": coupon["opgm"] }]}) rsp = self._run_request(self.urls.post, data=items, headers=nt_merge(self.headers)) rsp.stream = False if rsp.status_code == (200 or 204): rsp_dict = json.loads(rsp.text) for response in rsp_dict.get("items"): if "errorCd" not in response: coupon["added"] = True log.info("Successfully added coupon [{}] [{}]".format(coupon["title"], coupon["value"]).encode('utf-8').strip()) break else: log.error("Unable to add Coupon ID [{0}] || Error Code [{1}] || Error Message [{2}]".format( coupon["oid"], response.get("errorCd"), response.get("errorMsg")).encode("utf-8").strip()) break self._mock_delayer(index + 1) def _extract_coupon_detail(self, coupon_data): coupon_detail = coupon_data.get("offerDetail", {}) title = "{} {} {}".format(coupon_detail.get("titleDsc1", "").strip(), coupon_detail.get("titleDsc2", "").strip(), coupon_detail.get("prodDsc1", "")).strip() savings_value = "{}".format(coupon_detail.get("savingsValue", "")).strip() price_value = "{}".format(coupon_detail.get("priceValue1", "")).strip() true_value = savings_value if savings_value else price_value expiration = self._extract_coupon_expiration(coupon_data) full_coupon = {"title": title, "value": true_value, "added": False, "expiration": expiration} log.debug("The following coupon was processed successfully {}".format(json.dumps(full_coupon, indent=4))) return full_coupon def _extract_coupon_expiration(self, coupon_data): try: end_date_regex = "\\\/Date\(([0-9]{10})[0-9]{3}\)\\\/" expire_ts_unix = int(re.match(end_date_regex, coupon_data["offerEndDt"]).group(1)) expires = (datetime.datetime.fromtimestamp(expire_ts_unix) .strftime("%-m/%-d/%Y")) except Exception as e: log.error(e, "Exception getting coupon details") expires = "Unknown" return expires
furritos/mercado-api
mercado/core/safeway.py
Python
mit
6,259
0.003834
class Solution(object): def myGCD(self, x, y): if y == 0: return x else: return self.myGCD(y, x % y) def canMeasureWater(self, x, y, z): """ :type x: int :type y: int :type z: int :rtype: bool """ if x == 0 and y == z: return True if x == z and y == 0: return True if x == z or y == z: return True if z > x + y: return False return (z % self.myGCD(x, y) == 0)
hawkphantomnet/leetcode
WaterAndJugProblem/Solution.py
Python
mit
545
0
# # rtlsdr_scan # # http://eartoearoak.com/software/rtlsdr-scanner # # Copyright 2012 - 2017 Al Brown # # A frequency scanning GUI for the OsmoSDR rtl-sdr library at # http://sdr.osmocom.org/trac/wiki/rtl-sdr # # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from setuptools import setup, find_packages from rtlsdr_scanner.version import VERSION setup(name='rtlsdr_scanner', version='.'.join([str(x) for x in VERSION]), description='A simple spectrum analyser for scanning\n with a RTL-SDR compatible USB device', classifiers=['Development Status :: 4 - Beta', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', 'Topic :: Communications :: Ham Radio', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Visualization'], keywords='rtlsdr spectrum analyser', url='http://eartoearoak.com/software/rtlsdr-scanner', author='Al Brown', author_email='al@eartoearok.com', license='GPLv3', packages=find_packages(), package_data={'rtlsdr_scanner.res': ['*']}, install_requires=['numpy', 'matplotlib', 'Pillow', 'pyrtlsdr', 'pyserial', 'visvis'])
EarToEarOak/RTLSDR-Scanner
setup.py
Python
gpl-3.0
2,016
0.001488
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details on the presubmit API built into depot_tools. """ def CheckChange(input_api, output_api): """Checks the DrMemory suppression files for bad suppressions.""" # TODO(timurrrr): find out how to do relative imports # and remove this ugly hack. Also, the CheckChange function won't be needed. tools_vg_path = input_api.os_path.join(input_api.PresubmitLocalPath(), '..') import sys old_path = sys.path try: sys.path = sys.path + [tools_vg_path] import suppressions return suppressions.PresubmitCheck(input_api, output_api) finally: sys.path = old_path def CheckChangeOnUpload(input_api, output_api): return CheckChange(input_api, output_api) def CheckChangeOnCommit(input_api, output_api): return CheckChange(input_api, output_api) def GetPreferredTryMasters(project, change): return { 'tryserver.chromium.win': { 'win_drmemory': set(['defaulttests']), } }
Chilledheart/chromium
tools/valgrind/drmemory/PRESUBMIT.py
Python
bsd-3-clause
1,175
0.009362
import unittest, time, sys sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): # assume we're at 0xdata with it's hdfs namenode h2o.init(1, use_hdfs=True, hdfs_version='cdh4', hdfs_name_node='mr-0x6') @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_hdfs_multi_copies(self): print "\nUse the new regex capabilities for selecting hdfs: try *copies* at /datasets" # pop open a browser on the cloud # h2b.browseTheCloud() # defaults to /datasets parseResult = h2i.import_parse(path='datasets/manyfiles-nflx-gz/*', schema='hdfs', hex_key='manyfiles.hex', exclude=None, header=None, timeoutSecs=600) print "parse result:", parseResult['destination_key'] sys.stdout.flush() if __name__ == '__main__': h2o.unit_main()
rowhit/h2o-2
py/testdir_0xdata_only/test_hdfs_multi_copies.py
Python
apache-2.0
1,029
0.013605
import unittest from charlesbot.util.parse import parse_msg_with_prefix class TestMessageParser(unittest.TestCase): def test_prefix_uppercase(self): msg = "!ALL hi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual("hi, there!", retval) def test_prefix_mixed(self): msg = "!AlL hi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual("hi, there!", retval) def test_prefix_colon(self): msg = "!all: hi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual("hi, there!", retval) def test_prefix_colon_two(self): msg = "!all:hi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual("hi, there!", retval) def test_prefix_space(self): msg = "!all hi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual("hi, there!", retval) def test_prefix_whitespace(self): msg = "!all hi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual("hi, there!", retval) def test_prefix_leading_whitespace(self): msg = " !all hi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual("hi, there!", retval) def test_prefix_leading_whitespace_two(self): msg = " !all hi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual("hi, there!", retval) def test_prefix_invalid_one(self): msg = "s !all hi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual(None, retval) def test_prefix_invalid_two(self): msg = "!allhi, there!" retval = parse_msg_with_prefix("!all", msg) self.assertEqual(None, retval)
marvinpinto/charlesbot
tests/util/parse/test_message_parser.py
Python
mit
1,817
0
""" This module contains the default values for all settings used by Scrapy. For more information about these settings you can read the settings documentation in docs/topics/settings.rst Scrapy developers, if you add a setting here remember to: * add it in alphabetical order * group similar settings without leaving blank lines * add its documentation to the available settings documentation (docs/topics/settings.rst) """ import sys from importlib import import_module from os.path import join, abspath, dirname AJAXCRAWL_ENABLED = False ASYNCIO_EVENT_LOOP = None AUTOTHROTTLE_ENABLED = False AUTOTHROTTLE_DEBUG = False AUTOTHROTTLE_MAX_DELAY = 60.0 AUTOTHROTTLE_START_DELAY = 5.0 AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 BOT_NAME = 'scrapybot' CLOSESPIDER_TIMEOUT = 0 CLOSESPIDER_PAGECOUNT = 0 CLOSESPIDER_ITEMCOUNT = 0 CLOSESPIDER_ERRORCOUNT = 0 COMMANDS_MODULE = '' COMPRESSION_ENABLED = True CONCURRENT_ITEMS = 100 CONCURRENT_REQUESTS = 16 CONCURRENT_REQUESTS_PER_DOMAIN = 8 CONCURRENT_REQUESTS_PER_IP = 0 COOKIES_ENABLED = True COOKIES_DEBUG = False DEFAULT_ITEM_CLASS = 'scrapy.item.Item' DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } DEPTH_LIMIT = 0 DEPTH_STATS_VERBOSE = False DEPTH_PRIORITY = 0 DNSCACHE_ENABLED = True DNSCACHE_SIZE = 10000 DNS_RESOLVER = 'scrapy.resolver.CachingThreadedResolver' DNS_TIMEOUT = 60 DOWNLOAD_DELAY = 0 DOWNLOAD_HANDLERS = {} DOWNLOAD_HANDLERS_BASE = { 'data': 'scrapy.core.downloader.handlers.datauri.DataURIDownloadHandler', 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler', 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler', 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler', 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler', 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler', } DOWNLOAD_TIMEOUT = 180 # 3mins DOWNLOAD_MAXSIZE = 1024 * 1024 * 1024 # 1024m DOWNLOAD_WARNSIZE = 32 * 1024 * 1024 # 32m DOWNLOAD_FAIL_ON_DATALOSS = True DOWNLOADER = 'scrapy.core.downloader.Downloader' DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory' DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory' DOWNLOADER_CLIENT_TLS_CIPHERS = 'DEFAULT' # Use highest TLS/SSL protocol version supported by the platform, also allowing negotiation: DOWNLOADER_CLIENT_TLS_METHOD = 'TLS' DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING = False DOWNLOADER_MIDDLEWARES = {} DOWNLOADER_MIDDLEWARES_BASE = { # Engine side 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100, 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300, 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350, 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550, 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560, 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580, 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590, 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600, 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700, 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750, 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850, 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900, # Downloader side } DOWNLOADER_STATS = True DUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter' EDITOR = 'vi' if sys.platform == 'win32': EDITOR = '%s -m idlelib.idle' EXTENSIONS = {} EXTENSIONS_BASE = { 'scrapy.extensions.corestats.CoreStats': 0, 'scrapy.extensions.telnet.TelnetConsole': 0, 'scrapy.extensions.memusage.MemoryUsage': 0, 'scrapy.extensions.memdebug.MemoryDebugger': 0, 'scrapy.extensions.closespider.CloseSpider': 0, 'scrapy.extensions.feedexport.FeedExporter': 0, 'scrapy.extensions.logstats.LogStats': 0, 'scrapy.extensions.spiderstate.SpiderState': 0, 'scrapy.extensions.throttle.AutoThrottle': 0, } FEED_TEMPDIR = None FEEDS = {} FEED_URI_PARAMS = None # a function to extend uri arguments FEED_STORE_EMPTY = False FEED_EXPORT_ENCODING = None FEED_EXPORT_FIELDS = None FEED_STORAGES = {} FEED_STORAGES_BASE = { '': 'scrapy.extensions.feedexport.FileFeedStorage', 'file': 'scrapy.extensions.feedexport.FileFeedStorage', 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage', 'gs': 'scrapy.extensions.feedexport.GCSFeedStorage', 's3': 'scrapy.extensions.feedexport.S3FeedStorage', 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage', } FEED_EXPORT_BATCH_ITEM_COUNT = 0 FEED_EXPORTERS = {} FEED_EXPORTERS_BASE = { 'json': 'scrapy.exporters.JsonItemExporter', 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter', 'jl': 'scrapy.exporters.JsonLinesItemExporter', 'csv': 'scrapy.exporters.CsvItemExporter', 'xml': 'scrapy.exporters.XmlItemExporter', 'marshal': 'scrapy.exporters.MarshalItemExporter', 'pickle': 'scrapy.exporters.PickleItemExporter', } FEED_EXPORT_INDENT = 0 FEED_STORAGE_FTP_ACTIVE = False FEED_STORAGE_GCS_ACL = '' FEED_STORAGE_S3_ACL = '' FILES_STORE_S3_ACL = 'private' FILES_STORE_GCS_ACL = '' FTP_USER = 'anonymous' FTP_PASSWORD = 'guest' FTP_PASSIVE_MODE = True GCS_PROJECT_ID = None HTTPCACHE_ENABLED = False HTTPCACHE_DIR = 'httpcache' HTTPCACHE_IGNORE_MISSING = False HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' HTTPCACHE_EXPIRATION_SECS = 0 HTTPCACHE_ALWAYS_STORE = False HTTPCACHE_IGNORE_HTTP_CODES = [] HTTPCACHE_IGNORE_SCHEMES = ['file'] HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = [] HTTPCACHE_DBM_MODULE = 'dbm' HTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy' HTTPCACHE_GZIP = False HTTPPROXY_ENABLED = True HTTPPROXY_AUTH_ENCODING = 'latin-1' IMAGES_STORE_S3_ACL = 'private' IMAGES_STORE_GCS_ACL = '' ITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager' ITEM_PIPELINES = {} ITEM_PIPELINES_BASE = {} LOG_ENABLED = True LOG_ENCODING = 'utf-8' LOG_FORMATTER = 'scrapy.logformatter.LogFormatter' LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s' LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S' LOG_STDOUT = False LOG_LEVEL = 'DEBUG' LOG_FILE = None LOG_SHORT_NAMES = False SCHEDULER_DEBUG = False LOGSTATS_INTERVAL = 60.0 MAIL_HOST = 'localhost' MAIL_PORT = 25 MAIL_FROM = 'scrapy@localhost' MAIL_PASS = None MAIL_USER = None MEMDEBUG_ENABLED = False # enable memory debugging MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown MEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0 MEMUSAGE_ENABLED = True MEMUSAGE_LIMIT_MB = 0 MEMUSAGE_NOTIFY_MAIL = [] MEMUSAGE_WARNING_MB = 0 METAREFRESH_ENABLED = True METAREFRESH_IGNORE_TAGS = [] METAREFRESH_MAXDELAY = 100 NEWSPIDER_MODULE = '' RANDOMIZE_DOWNLOAD_DELAY = True REACTOR_THREADPOOL_MAXSIZE = 10 REDIRECT_ENABLED = True REDIRECT_MAX_TIMES = 20 # uses Firefox default setting REDIRECT_PRIORITY_ADJUST = +2 REFERER_ENABLED = True REFERRER_POLICY = 'scrapy.spidermiddlewares.referer.DefaultReferrerPolicy' RETRY_ENABLED = True RETRY_TIMES = 2 # initial response + 2 retries = 3 requests RETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408, 429] RETRY_PRIORITY_ADJUST = -1 ROBOTSTXT_OBEY = False ROBOTSTXT_PARSER = 'scrapy.robotstxt.ProtegoRobotParser' ROBOTSTXT_USER_AGENT = None SCHEDULER = 'scrapy.core.scheduler.Scheduler' SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue' SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue' SCHEDULER_PRIORITY_QUEUE = 'scrapy.pqueues.ScrapyPriorityQueue' SCRAPER_SLOT_MAX_ACTIVE_SIZE = 5000000 SPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader' SPIDER_LOADER_WARN_ONLY = False SPIDER_MIDDLEWARES = {} SPIDER_MIDDLEWARES_BASE = { # Engine side 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50, 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500, 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700, 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800, 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900, # Spider side } SPIDER_MODULES = [] STATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector' STATS_DUMP = True STATSMAILER_RCPTS = [] TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates')) URLLENGTH_LIMIT = 2083 USER_AGENT = f'Scrapy/{import_module("scrapy").__version__} (+https://scrapy.org)' TELNETCONSOLE_ENABLED = 1 TELNETCONSOLE_PORT = [6023, 6073] TELNETCONSOLE_HOST = '127.0.0.1' TELNETCONSOLE_USERNAME = 'scrapy' TELNETCONSOLE_PASSWORD = None TWISTED_REACTOR = None SPIDER_CONTRACTS = {} SPIDER_CONTRACTS_BASE = { 'scrapy.contracts.default.UrlContract': 1, 'scrapy.contracts.default.CallbackKeywordArgumentsContract': 1, 'scrapy.contracts.default.ReturnsContract': 2, 'scrapy.contracts.default.ScrapesContract': 3, }
starrify/scrapy
scrapy/settings/default_settings.py
Python
bsd-3-clause
9,161
0.000982
# force floating point division. Can still use integer with // from __future__ import division # This file is used for importing the common utilities classes. import numpy as np import matplotlib.pyplot as plt import sys sys.path.append("../../../../../../") from Util import Test from Util.Test import _f_assert,HummerData,load_simulated_data from FitUtil.EnergyLandscapes.InverseWeierstrass.Python.Code import \ InverseWeierstrass,WeierstrassUtil,WeightedHistogram def assert_all_digitization_correct(objs): for o in objs: _assert_digitization_correct(o) def run(): fwd,rev = load_simulated_data(n=2) assert_all_digitization_correct(fwd) assert_all_digitization_correct(rev) if __name__ == "__main__": run()
prheenan/BioModel
EnergyLandscapes/InverseWeierstrass/Python/TestExamples/Testing/MainTestingWeightedHistograms.py
Python
gpl-2.0
747
0.013387
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from os import chmod from spack import * class Tbl2asn(Package): """Tbl2asn is a command-line program that automates the creation of sequence records for submission to GenBank.""" homepage = "https://www.ncbi.nlm.nih.gov/genbank/tbl2asn2/" version('2020-03-01', sha256='7cc1119d3cfcbbffdbd4ecf33cef8bbdd44fc5625c72976bee08b1157625377e') def url_for_version(self, ver): return "https://ftp.ncbi.nih.gov/toolbox/ncbi_tools/converters/by_program/tbl2asn/linux.tbl2asn.gz" def install(self, spec, prefix): mkdirp(prefix.bin) install('../linux.tbl2asn', prefix.bin.tbl2asn) chmod(prefix.bin.tbl2asn, 0o775)
LLNL/spack
var/spack/repos/builtin/packages/tbl2asn/package.py
Python
lgpl-2.1
866
0.002309
# -*- coding: utf-8 -*- from __future__ import print_function import re import sys from datetime import datetime, timedelta import pytest import numpy as np import pandas as pd import pandas.compat as compat from pandas.core.dtypes.common import ( is_object_dtype, is_datetimetz, needs_i8_conversion) import pandas.util.testing as tm from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, PeriodIndex, Timedelta, IntervalIndex, Interval, CategoricalIndex, Timestamp) from pandas.compat import StringIO, PYPY, long from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.accessor import PandasDelegate from pandas.core.base import PandasObject, NoNewAttributesMixin from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin from pandas._libs.tslib import iNaT class CheckStringMixin(object): def test_string_methods_dont_fail(self): repr(self.container) str(self.container) bytes(self.container) if not compat.PY3: unicode(self.container) # noqa def test_tricky_container(self): if not hasattr(self, 'unicode_container'): pytest.skip('Need unicode_container to test with this') repr(self.unicode_container) str(self.unicode_container) bytes(self.unicode_container) if not compat.PY3: unicode(self.unicode_container) # noqa class CheckImmutable(object): mutable_regex = re.compile('does not support mutable operations') def check_mutable_error(self, *args, **kwargs): # Pass whatever function you normally would to assert_raises_regex # (after the Exception kind). tm.assert_raises_regex( TypeError, self.mutable_regex, *args, **kwargs) def test_no_mutable_funcs(self): def setitem(): self.container[0] = 5 self.check_mutable_error(setitem) def setslice(): self.container[1:2] = 3 self.check_mutable_error(setslice) def delitem(): del self.container[0] self.check_mutable_error(delitem) def delslice(): del self.container[0:3] self.check_mutable_error(delslice) mutable_methods = getattr(self, "mutable_methods", []) for meth in mutable_methods: self.check_mutable_error(getattr(self.container, meth)) def test_slicing_maintains_type(self): result = self.container[1:2] expected = self.lst[1:2] self.check_result(result, expected) def check_result(self, result, expected, klass=None): klass = klass or self.klass assert isinstance(result, klass) assert result == expected class TestPandasDelegate(object): class Delegator(object): _properties = ['foo'] _methods = ['bar'] def _set_foo(self, value): self.foo = value def _get_foo(self): return self.foo foo = property(_get_foo, _set_foo, doc="foo property") def bar(self, *args, **kwargs): """ a test bar method """ pass class Delegate(PandasDelegate, PandasObject): def __init__(self, obj): self.obj = obj def setup_method(self, method): pass def test_invalida_delgation(self): # these show that in order for the delegation to work # the _delegate_* methods need to be overridden to not raise # a TypeError self.Delegate._add_delegate_accessors( delegate=self.Delegator, accessors=self.Delegator._properties, typ='property' ) self.Delegate._add_delegate_accessors( delegate=self.Delegator, accessors=self.Delegator._methods, typ='method' ) delegate = self.Delegate(self.Delegator()) def f(): delegate.foo pytest.raises(TypeError, f) def f(): delegate.foo = 5 pytest.raises(TypeError, f) def f(): delegate.foo() pytest.raises(TypeError, f) @pytest.mark.skipif(PYPY, reason="not relevant for PyPy") def test_memory_usage(self): # Delegate does not implement memory_usage. # Check that we fall back to in-built `__sizeof__` # GH 12924 delegate = self.Delegate(self.Delegator()) sys.getsizeof(delegate) class Ops(object): def _allow_na_ops(self, obj): """Whether to skip test cases including NaN""" if (isinstance(obj, Index) and (obj.is_boolean() or not obj._can_hold_na)): # don't test boolean / int64 index return False return True def setup_method(self, method): self.bool_index = tm.makeBoolIndex(10, name='a') self.int_index = tm.makeIntIndex(10, name='a') self.float_index = tm.makeFloatIndex(10, name='a') self.dt_index = tm.makeDateIndex(10, name='a') self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize( tz='US/Eastern') self.period_index = tm.makePeriodIndex(10, name='a') self.string_index = tm.makeStringIndex(10, name='a') self.unicode_index = tm.makeUnicodeIndex(10, name='a') arr = np.random.randn(10) self.int_series = Series(arr, index=self.int_index, name='a') self.float_series = Series(arr, index=self.float_index, name='a') self.dt_series = Series(arr, index=self.dt_index, name='a') self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True) self.period_series = Series(arr, index=self.period_index, name='a') self.string_series = Series(arr, index=self.string_index, name='a') types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string', 'unicode'] fmts = ["{0}_{1}".format(t, f) for t in types for f in ['index', 'series']] self.objs = [getattr(self, f) for f in fmts if getattr(self, f, None) is not None] def check_ops_properties(self, props, filter=None, ignore_failures=False): for op in props: for o in self.is_valid_objs: # if a filter, skip if it doesn't match if filter is not None: filt = o.index if isinstance(o, Series) else o if not filter(filt): continue try: if isinstance(o, Series): expected = Series( getattr(o.index, op), index=o.index, name='a') else: expected = getattr(o, op) except (AttributeError): if ignore_failures: continue result = getattr(o, op) # these couuld be series, arrays or scalars if isinstance(result, Series) and isinstance(expected, Series): tm.assert_series_equal(result, expected) elif isinstance(result, Index) and isinstance(expected, Index): tm.assert_index_equal(result, expected) elif isinstance(result, np.ndarray) and isinstance(expected, np.ndarray): tm.assert_numpy_array_equal(result, expected) else: assert result == expected # freq raises AttributeError on an Int64Index because its not # defined we mostly care about Series here anyhow if not ignore_failures: for o in self.not_valid_objs: # an object that is datetimelike will raise a TypeError, # otherwise an AttributeError if issubclass(type(o), DatetimeIndexOpsMixin): pytest.raises(TypeError, lambda: getattr(o, op)) else: pytest.raises(AttributeError, lambda: getattr(o, op)) def test_binary_ops_docs(self): from pandas import DataFrame, Panel op_map = {'add': '+', 'sub': '-', 'mul': '*', 'mod': '%', 'pow': '**', 'truediv': '/', 'floordiv': '//'} for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv', 'floordiv']: for klass in [Series, DataFrame, Panel]: operand1 = klass.__name__.lower() operand2 = 'other' op = op_map[op_name] expected_str = ' '.join([operand1, op, operand2]) assert expected_str in getattr(klass, op_name).__doc__ # reverse version of the binary ops expected_str = ' '.join([operand2, op, operand1]) assert expected_str in getattr(klass, 'r' + op_name).__doc__ class TestIndexOps(Ops): def setup_method(self, method): super(TestIndexOps, self).setup_method(method) self.is_valid_objs = [o for o in self.objs if o._allow_index_ops] self.not_valid_objs = [o for o in self.objs if not o._allow_index_ops] def test_none_comparison(self): # bug brought up by #1079 # changed from TypeError in 0.17.0 for o in self.is_valid_objs: if isinstance(o, Series): o[0] = np.nan # noinspection PyComparisonWithNone result = o == None # noqa assert not result.iat[0] assert not result.iat[1] # noinspection PyComparisonWithNone result = o != None # noqa assert result.iat[0] assert result.iat[1] result = None == o # noqa assert not result.iat[0] assert not result.iat[1] # this fails for numpy < 1.9 # and oddly for *some* platforms # result = None != o # noqa # assert result.iat[0] # assert result.iat[1] result = None > o assert not result.iat[0] assert not result.iat[1] result = o < None assert not result.iat[0] assert not result.iat[1] def test_ndarray_compat_properties(self): for o in self.objs: # Check that we work. for p in ['shape', 'dtype', 'flags', 'T', 'strides', 'itemsize', 'nbytes']: assert getattr(o, p, None) is not None assert hasattr(o, 'base') # If we have a datetime-like dtype then needs a view to work # but the user is responsible for that try: assert o.data is not None except ValueError: pass with pytest.raises(ValueError): o.item() # len > 1 assert o.ndim == 1 assert o.size == len(o) assert Index([1]).item() == 1 assert Series([1]).item() == 1 def test_ops(self): for op in ['max', 'min']: for o in self.objs: result = getattr(o, op)() if not isinstance(o, PeriodIndex): expected = getattr(o.values, op)() else: expected = pd.Period(ordinal=getattr(o._values, op)(), freq=o.freq) try: assert result == expected except TypeError: # comparing tz-aware series with np.array results in # TypeError expected = expected.astype('M8[ns]').astype('int64') assert result.value == expected def test_nanops(self): # GH 7261 for op in ['max', 'min']: for klass in [Index, Series]: obj = klass([np.nan, 2.0]) assert getattr(obj, op)() == 2.0 obj = klass([np.nan]) assert pd.isna(getattr(obj, op)()) obj = klass([]) assert pd.isna(getattr(obj, op)()) obj = klass([pd.NaT, datetime(2011, 11, 1)]) # check DatetimeIndex monotonic path assert getattr(obj, op)() == datetime(2011, 11, 1) obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT]) # check DatetimeIndex non-monotonic path assert getattr(obj, op)(), datetime(2011, 11, 1) # argmin/max obj = Index(np.arange(5, dtype='int64')) assert obj.argmin() == 0 assert obj.argmax() == 4 obj = Index([np.nan, 1, np.nan, 2]) assert obj.argmin() == 1 assert obj.argmax() == 3 obj = Index([np.nan]) assert obj.argmin() == -1 assert obj.argmax() == -1 obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2), pd.NaT]) assert obj.argmin() == 1 assert obj.argmax() == 2 obj = Index([pd.NaT]) assert obj.argmin() == -1 assert obj.argmax() == -1 def test_value_counts_unique_nunique(self): for orig in self.objs: o = orig.copy() klass = type(o) values = o._values if isinstance(values, Index): # reset name not to affect latter process values.name = None # create repeated values, 'n'th element is repeated by n+1 times # skip boolean, because it only has 2 values at most if isinstance(o, Index) and o.is_boolean(): continue elif isinstance(o, Index): expected_index = Index(o[::-1]) expected_index.name = None o = o.repeat(range(1, len(o) + 1)) o.name = 'a' else: expected_index = Index(values[::-1]) idx = o.index.repeat(range(1, len(o) + 1)) rep = np.repeat(values, range(1, len(o) + 1)) o = klass(rep, index=idx, name='a') # check values has the same dtype as the original assert o.dtype == orig.dtype expected_s = Series(range(10, 0, -1), index=expected_index, dtype='int64', name='a') result = o.value_counts() tm.assert_series_equal(result, expected_s) assert result.index.name is None assert result.name == 'a' result = o.unique() if isinstance(o, Index): assert isinstance(result, o.__class__) tm.assert_index_equal(result, orig) elif is_datetimetz(o): # datetimetz Series returns array of Timestamp assert result[0] == orig[0] for r in result: assert isinstance(r, Timestamp) tm.assert_numpy_array_equal(result, orig._values.astype(object).values) else: tm.assert_numpy_array_equal(result, orig.values) assert o.nunique() == len(np.unique(o.values)) def test_value_counts_unique_nunique_null(self): for null_obj in [np.nan, None]: for orig in self.objs: o = orig.copy() klass = type(o) values = o._values if not self._allow_na_ops(o): continue # special assign to the numpy array if is_datetimetz(o): if isinstance(o, DatetimeIndex): v = o.asi8 v[0:2] = iNaT values = o._shallow_copy(v) else: o = o.copy() o[0:2] = iNaT values = o._values elif needs_i8_conversion(o): values[0:2] = iNaT values = o._shallow_copy(values) else: values[0:2] = null_obj # check values has the same dtype as the original assert values.dtype == o.dtype # create repeated values, 'n'th element is repeated by n+1 # times if isinstance(o, (DatetimeIndex, PeriodIndex)): expected_index = o.copy() expected_index.name = None # attach name to klass o = klass(values.repeat(range(1, len(o) + 1))) o.name = 'a' else: if is_datetimetz(o): expected_index = orig._values._shallow_copy(values) else: expected_index = Index(values) expected_index.name = None o = o.repeat(range(1, len(o) + 1)) o.name = 'a' # check values has the same dtype as the original assert o.dtype == orig.dtype # check values correctly have NaN nanloc = np.zeros(len(o), dtype=np.bool) nanloc[:3] = True if isinstance(o, Index): tm.assert_numpy_array_equal(pd.isna(o), nanloc) else: exp = Series(nanloc, o.index, name='a') tm.assert_series_equal(pd.isna(o), exp) expected_s_na = Series(list(range(10, 2, -1)) + [3], index=expected_index[9:0:-1], dtype='int64', name='a') expected_s = Series(list(range(10, 2, -1)), index=expected_index[9:1:-1], dtype='int64', name='a') result_s_na = o.value_counts(dropna=False) tm.assert_series_equal(result_s_na, expected_s_na) assert result_s_na.index.name is None assert result_s_na.name == 'a' result_s = o.value_counts() tm.assert_series_equal(o.value_counts(), expected_s) assert result_s.index.name is None assert result_s.name == 'a' result = o.unique() if isinstance(o, Index): tm.assert_index_equal(result, Index(values[1:], name='a')) elif is_datetimetz(o): # unable to compare NaT / nan vals = values[2:].astype(object).values tm.assert_numpy_array_equal(result[1:], vals) assert result[0] is pd.NaT else: tm.assert_numpy_array_equal(result[1:], values[2:]) assert pd.isna(result[0]) assert result.dtype == orig.dtype assert o.nunique() == 8 assert o.nunique(dropna=False) == 9 def test_value_counts_inferred(self): klasses = [Index, Series] for klass in klasses: s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a'] s = klass(s_values) expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c']) tm.assert_series_equal(s.value_counts(), expected) if isinstance(s, Index): exp = Index(np.unique(np.array(s_values, dtype=np.object_))) tm.assert_index_equal(s.unique(), exp) else: exp = np.unique(np.array(s_values, dtype=np.object_)) tm.assert_numpy_array_equal(s.unique(), exp) assert s.nunique() == 4 # don't sort, have to sort after the fact as not sorting is # platform-dep hist = s.value_counts(sort=False).sort_values() expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values() tm.assert_series_equal(hist, expected) # sort ascending hist = s.value_counts(ascending=True) expected = Series([1, 2, 3, 4], index=list('cdab')) tm.assert_series_equal(hist, expected) # relative histogram. hist = s.value_counts(normalize=True) expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c']) tm.assert_series_equal(hist, expected) def test_value_counts_bins(self): klasses = [Index, Series] for klass in klasses: s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a'] s = klass(s_values) # bins pytest.raises(TypeError, lambda bins: s.value_counts(bins=bins), 1) s1 = Series([1, 1, 2, 3]) res1 = s1.value_counts(bins=1) exp1 = Series({Interval(0.997, 3.0): 4}) tm.assert_series_equal(res1, exp1) res1n = s1.value_counts(bins=1, normalize=True) exp1n = Series({Interval(0.997, 3.0): 1.0}) tm.assert_series_equal(res1n, exp1n) if isinstance(s1, Index): tm.assert_index_equal(s1.unique(), Index([1, 2, 3])) else: exp = np.array([1, 2, 3], dtype=np.int64) tm.assert_numpy_array_equal(s1.unique(), exp) assert s1.nunique() == 3 # these return the same res4 = s1.value_counts(bins=4, dropna=True) intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2])) tm.assert_series_equal(res4, exp4) res4 = s1.value_counts(bins=4, dropna=False) intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2])) tm.assert_series_equal(res4, exp4) res4n = s1.value_counts(bins=4, normalize=True) exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2])) tm.assert_series_equal(res4n, exp4n) # handle NA's properly s_values = ['a', 'b', 'b', 'b', np.nan, np.nan, 'd', 'd', 'a', 'a', 'b'] s = klass(s_values) expected = Series([4, 3, 2], index=['b', 'a', 'd']) tm.assert_series_equal(s.value_counts(), expected) if isinstance(s, Index): exp = Index(['a', 'b', np.nan, 'd']) tm.assert_index_equal(s.unique(), exp) else: exp = np.array(['a', 'b', np.nan, 'd'], dtype=object) tm.assert_numpy_array_equal(s.unique(), exp) assert s.nunique() == 3 s = klass({}) expected = Series([], dtype=np.int64) tm.assert_series_equal(s.value_counts(), expected, check_index_type=False) # returned dtype differs depending on original if isinstance(s, Index): tm.assert_index_equal(s.unique(), Index([]), exact=False) else: tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False) assert s.nunique() == 0 def test_value_counts_datetime64(self): klasses = [Index, Series] for klass in klasses: # GH 3002, datetime64[ns] # don't test names though txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', 'xxyyzz20100101EGG', 'xxyyww20090101EGG', 'foofoo20080909PIE', 'foofoo20080909GUM']) f = StringIO(txt) df = pd.read_fwf(f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]) s = klass(df['dt'].copy()) s.name = None idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z', '2009-01-01 00:00:00X']) expected_s = Series([3, 2, 1], index=idx) tm.assert_series_equal(s.value_counts(), expected_s) expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z', '2008-09-09 00:00:00Z'], dtype='datetime64[ns]') if isinstance(s, Index): tm.assert_index_equal(s.unique(), DatetimeIndex(expected)) else: tm.assert_numpy_array_equal(s.unique(), expected) assert s.nunique() == 3 # with NaT s = df['dt'].copy() s = klass([v for v in s.values] + [pd.NaT]) result = s.value_counts() assert result.index.dtype == 'datetime64[ns]' tm.assert_series_equal(result, expected_s) result = s.value_counts(dropna=False) expected_s[pd.NaT] = 1 tm.assert_series_equal(result, expected_s) unique = s.unique() assert unique.dtype == 'datetime64[ns]' # numpy_array_equal cannot compare pd.NaT if isinstance(s, Index): exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT]) tm.assert_index_equal(unique, exp_idx) else: tm.assert_numpy_array_equal(unique[:3], expected) assert pd.isna(unique[3]) assert s.nunique() == 3 assert s.nunique(dropna=False) == 4 # timedelta64[ns] td = df.dt - df.dt + timedelta(1) td = klass(td, name='dt') result = td.value_counts() expected_s = Series([6], index=[Timedelta('1day')], name='dt') tm.assert_series_equal(result, expected_s) expected = TimedeltaIndex(['1 days'], name='dt') if isinstance(td, Index): tm.assert_index_equal(td.unique(), expected) else: tm.assert_numpy_array_equal(td.unique(), expected.values) td2 = timedelta(1) + (df.dt - df.dt) td2 = klass(td2, name='dt') result2 = td2.value_counts() tm.assert_series_equal(result2, expected_s) def test_factorize(self): for orig in self.objs: o = orig.copy() if isinstance(o, Index) and o.is_boolean(): exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp) exp_uniques = o exp_uniques = Index([False, True]) else: exp_arr = np.array(range(len(o)), dtype=np.intp) exp_uniques = o labels, uniques = o.factorize() tm.assert_numpy_array_equal(labels, exp_arr) if isinstance(o, Series): tm.assert_index_equal(uniques, Index(orig), check_names=False) else: # factorize explicitly resets name tm.assert_index_equal(uniques, exp_uniques, check_names=False) def test_factorize_repeated(self): for orig in self.objs: o = orig.copy() # don't test boolean if isinstance(o, Index) and o.is_boolean(): continue # sort by value, and create duplicates if isinstance(o, Series): o = o.sort_values() n = o.iloc[5:].append(o) else: indexer = o.argsort() o = o.take(indexer) n = o[5:].append(o) exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp) labels, uniques = n.factorize(sort=True) tm.assert_numpy_array_equal(labels, exp_arr) if isinstance(o, Series): tm.assert_index_equal(uniques, Index(orig).sort_values(), check_names=False) else: tm.assert_index_equal(uniques, o, check_names=False) exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp) labels, uniques = n.factorize(sort=False) tm.assert_numpy_array_equal(labels, exp_arr) if isinstance(o, Series): expected = Index(o.iloc[5:10].append(o.iloc[:5])) tm.assert_index_equal(uniques, expected, check_names=False) else: expected = o[5:10].append(o[:5]) tm.assert_index_equal(uniques, expected, check_names=False) def test_duplicated_drop_duplicates_index(self): # GH 4060 for original in self.objs: if isinstance(original, Index): # special case if original.is_boolean(): result = original.drop_duplicates() expected = Index([False, True], name='a') tm.assert_index_equal(result, expected) continue # original doesn't have duplicates expected = np.array([False] * len(original), dtype=bool) duplicated = original.duplicated() tm.assert_numpy_array_equal(duplicated, expected) assert duplicated.dtype == bool result = original.drop_duplicates() tm.assert_index_equal(result, original) assert result is not original # has_duplicates assert not original.has_duplicates # create repeated values, 3rd and 5th values are duplicated idx = original[list(range(len(original))) + [5, 3]] expected = np.array([False] * len(original) + [True, True], dtype=bool) duplicated = idx.duplicated() tm.assert_numpy_array_equal(duplicated, expected) assert duplicated.dtype == bool tm.assert_index_equal(idx.drop_duplicates(), original) base = [False] * len(idx) base[3] = True base[5] = True expected = np.array(base) duplicated = idx.duplicated(keep='last') tm.assert_numpy_array_equal(duplicated, expected) assert duplicated.dtype == bool result = idx.drop_duplicates(keep='last') tm.assert_index_equal(result, idx[~expected]) base = [False] * len(original) + [True, True] base[3] = True base[5] = True expected = np.array(base) duplicated = idx.duplicated(keep=False) tm.assert_numpy_array_equal(duplicated, expected) assert duplicated.dtype == bool result = idx.drop_duplicates(keep=False) tm.assert_index_equal(result, idx[~expected]) with tm.assert_raises_regex( TypeError, r"drop_duplicates\(\) got an unexpected " "keyword argument"): idx.drop_duplicates(inplace=True) else: expected = Series([False] * len(original), index=original.index, name='a') tm.assert_series_equal(original.duplicated(), expected) result = original.drop_duplicates() tm.assert_series_equal(result, original) assert result is not original idx = original.index[list(range(len(original))) + [5, 3]] values = original._values[list(range(len(original))) + [5, 3]] s = Series(values, index=idx, name='a') expected = Series([False] * len(original) + [True, True], index=idx, name='a') tm.assert_series_equal(s.duplicated(), expected) tm.assert_series_equal(s.drop_duplicates(), original) base = [False] * len(idx) base[3] = True base[5] = True expected = Series(base, index=idx, name='a') tm.assert_series_equal(s.duplicated(keep='last'), expected) tm.assert_series_equal(s.drop_duplicates(keep='last'), s[~np.array(base)]) base = [False] * len(original) + [True, True] base[3] = True base[5] = True expected = Series(base, index=idx, name='a') tm.assert_series_equal(s.duplicated(keep=False), expected) tm.assert_series_equal(s.drop_duplicates(keep=False), s[~np.array(base)]) s.drop_duplicates(inplace=True) tm.assert_series_equal(s, original) def test_drop_duplicates_series_vs_dataframe(self): # GH 14192 df = pd.DataFrame({'a': [1, 1, 1, 'one', 'one'], 'b': [2, 2, np.nan, np.nan, np.nan], 'c': [3, 3, np.nan, np.nan, 'three'], 'd': [1, 2, 3, 4, 4], 'e': [datetime(2015, 1, 1), datetime(2015, 1, 1), datetime(2015, 2, 1), pd.NaT, pd.NaT] }) for column in df.columns: for keep in ['first', 'last', False]: dropped_frame = df[[column]].drop_duplicates(keep=keep) dropped_series = df[column].drop_duplicates(keep=keep) tm.assert_frame_equal(dropped_frame, dropped_series.to_frame()) def test_fillna(self): # # GH 11343 # though Index.fillna and Series.fillna has separate impl, # test here to confirm these works as the same for orig in self.objs: o = orig.copy() values = o.values # values will not be changed result = o.fillna(o.astype(object).values[0]) if isinstance(o, Index): tm.assert_index_equal(o, result) else: tm.assert_series_equal(o, result) # check shallow_copied assert o is not result for null_obj in [np.nan, None]: for orig in self.objs: o = orig.copy() klass = type(o) if not self._allow_na_ops(o): continue if needs_i8_conversion(o): values = o.astype(object).values fill_value = values[0] values[0:2] = pd.NaT else: values = o.values.copy() fill_value = o.values[0] values[0:2] = null_obj expected = [fill_value] * 2 + list(values[2:]) expected = klass(expected) o = klass(values) # check values has the same dtype as the original assert o.dtype == orig.dtype result = o.fillna(fill_value) if isinstance(o, Index): tm.assert_index_equal(result, expected) else: tm.assert_series_equal(result, expected) # check shallow_copied assert o is not result @pytest.mark.skipif(PYPY, reason="not relevant for PyPy") def test_memory_usage(self): for o in self.objs: res = o.memory_usage() res_deep = o.memory_usage(deep=True) if (is_object_dtype(o) or (isinstance(o, Series) and is_object_dtype(o.index))): # if there are objects, only deep will pick them up assert res_deep > res else: assert res == res_deep if isinstance(o, Series): assert ((o.memory_usage(index=False) + o.index.memory_usage()) == o.memory_usage(index=True)) # sys.getsizeof will call the .memory_usage with # deep=True, and add on some GC overhead diff = res_deep - sys.getsizeof(o) assert abs(diff) < 100 def test_searchsorted(self): # See gh-12238 for o in self.objs: index = np.searchsorted(o, max(o)) assert 0 <= index <= len(o) index = np.searchsorted(o, max(o), sorter=range(len(o))) assert 0 <= index <= len(o) def test_validate_bool_args(self): invalid_values = [1, "True", [1, 2, 3], 5.0] for value in invalid_values: with pytest.raises(ValueError): self.int_series.drop_duplicates(inplace=value) class TestTranspose(Ops): errmsg = "the 'axes' parameter is not supported" def test_transpose(self): for obj in self.objs: if isinstance(obj, Index): tm.assert_index_equal(obj.transpose(), obj) else: tm.assert_series_equal(obj.transpose(), obj) def test_transpose_non_default_axes(self): for obj in self.objs: tm.assert_raises_regex(ValueError, self.errmsg, obj.transpose, 1) tm.assert_raises_regex(ValueError, self.errmsg, obj.transpose, axes=1) def test_numpy_transpose(self): for obj in self.objs: if isinstance(obj, Index): tm.assert_index_equal(np.transpose(obj), obj) else: tm.assert_series_equal(np.transpose(obj), obj) tm.assert_raises_regex(ValueError, self.errmsg, np.transpose, obj, axes=1) class TestNoNewAttributesMixin(object): def test_mixin(self): class T(NoNewAttributesMixin): pass t = T() assert not hasattr(t, "__frozen") t.a = "test" assert t.a == "test" t._freeze() assert "__frozen" in dir(t) assert getattr(t, "__frozen") def f(): t.b = "test" pytest.raises(AttributeError, f) assert not hasattr(t, "b") class TestToIterable(object): # test that we convert an iterable to python types dtypes = [ ('int8', (int, long)), ('int16', (int, long)), ('int32', (int, long)), ('int64', (int, long)), ('uint8', (int, long)), ('uint16', (int, long)), ('uint32', (int, long)), ('uint64', (int, long)), ('float16', float), ('float32', float), ('float64', float), ('datetime64[ns]', Timestamp), ('datetime64[ns, US/Eastern]', Timestamp), ('timedelta64[ns]', Timedelta)] @pytest.mark.parametrize( 'dtype, rdtype', dtypes) @pytest.mark.parametrize( 'method', [ lambda x: x.tolist(), lambda x: list(x), lambda x: list(x.__iter__()), ], ids=['tolist', 'list', 'iter']) @pytest.mark.parametrize('typ', [Series, Index]) def test_iterable(self, typ, method, dtype, rdtype): # gh-10904 # gh-13258 # coerce iteration to underlying python / pandas types s = typ([1], dtype=dtype) result = method(s)[0] assert isinstance(result, rdtype) @pytest.mark.parametrize( 'dtype, rdtype, obj', [ ('object', object, 'a'), ('object', (int, long), 1), ('category', object, 'a'), ('category', (int, long), 1)]) @pytest.mark.parametrize( 'method', [ lambda x: x.tolist(), lambda x: list(x), lambda x: list(x.__iter__()), ], ids=['tolist', 'list', 'iter']) @pytest.mark.parametrize('typ', [Series, Index]) def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj): # gh-10904 # gh-13258 # coerce iteration to underlying python / pandas types s = typ([obj], dtype=dtype) result = method(s)[0] assert isinstance(result, rdtype) @pytest.mark.parametrize( 'dtype, rdtype', dtypes) def test_iterable_items(self, dtype, rdtype): # gh-13258 # test items / iteritems yields the correct boxed scalars # this only applies to series s = Series([1], dtype=dtype) _, result = list(s.items())[0] assert isinstance(result, rdtype) _, result = list(s.iteritems())[0] assert isinstance(result, rdtype) @pytest.mark.parametrize( 'dtype, rdtype', dtypes + [ ('object', (int, long)), ('category', (int, long))]) @pytest.mark.parametrize('typ', [Series, Index]) def test_iterable_map(self, typ, dtype, rdtype): # gh-13236 # coerce iteration to underlying python / pandas types s = typ([1], dtype=dtype) result = s.map(type)[0] if not isinstance(rdtype, tuple): rdtype = tuple([rdtype]) assert result in rdtype @pytest.mark.parametrize( 'method', [ lambda x: x.tolist(), lambda x: list(x), lambda x: list(x.__iter__()), ], ids=['tolist', 'list', 'iter']) def test_categorial_datetimelike(self, method): i = CategoricalIndex([Timestamp('1999-12-31'), Timestamp('2000-12-31')]) result = method(i)[0] assert isinstance(result, Timestamp) def test_iter_box(self): vals = [Timestamp('2011-01-01'), Timestamp('2011-01-02')] s = Series(vals) assert s.dtype == 'datetime64[ns]' for res, exp in zip(s, vals): assert isinstance(res, Timestamp) assert res.tz is None assert res == exp vals = [Timestamp('2011-01-01', tz='US/Eastern'), Timestamp('2011-01-02', tz='US/Eastern')] s = Series(vals) assert s.dtype == 'datetime64[ns, US/Eastern]' for res, exp in zip(s, vals): assert isinstance(res, Timestamp) assert res.tz == exp.tz assert res == exp # timedelta vals = [Timedelta('1 days'), Timedelta('2 days')] s = Series(vals) assert s.dtype == 'timedelta64[ns]' for res, exp in zip(s, vals): assert isinstance(res, Timedelta) assert res == exp # period (object dtype, not boxed) vals = [pd.Period('2011-01-01', freq='M'), pd.Period('2011-01-02', freq='M')] s = Series(vals) assert s.dtype == 'object' for res, exp in zip(s, vals): assert isinstance(res, pd.Period) assert res.freq == 'M' assert res == exp
zfrenchee/pandas
pandas/tests/test_base.py
Python
bsd-3-clause
43,476
0
# Copyright 2022 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for metrics.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from magenta.models.onsets_frames_transcription import infer_util import numpy as np import tensorflow.compat.v1 as tf tf.disable_v2_behavior() class InferUtilTest(tf.test.TestCase): def testProbsToPianorollViterbi(self): frame_probs = np.array([[0.2, 0.1], [0.5, 0.1], [0.5, 0.1], [0.8, 0.1]]) onset_probs = np.array([[0.1, 0.1], [0.1, 0.1], [0.9, 0.1], [0.1, 0.1]]) pianoroll = infer_util.probs_to_pianoroll_viterbi(frame_probs, onset_probs) np.testing.assert_array_equal( [[False, False], [False, False], [True, False], [True, False]], pianoroll) if __name__ == '__main__': tf.test.main()
magenta/magenta
magenta/models/onsets_frames_transcription/infer_util_test.py
Python
apache-2.0
1,356
0.001475
#!/usr/bin/python # Author: Jon Trulson <jtrulson@ics.com> # Copyright (c) 2016 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import print_function import time, sys, signal, atexit from upm import pyupm_bmx055 as sensorObj def main(): # Instantiate a BMX055 instance using default i2c bus and address sensor = sensorObj.BMX055() ## Exit handlers ## # This function stops python from printing a stacktrace when you hit control-C def SIGINTHandler(signum, frame): raise SystemExit # This function lets you run code on exit def exitHandler(): print("Exiting") sys.exit(0) # Register exit handlers atexit.register(exitHandler) signal.signal(signal.SIGINT, SIGINTHandler) x = sensorObj.new_floatp() y = sensorObj.new_floatp() z = sensorObj.new_floatp() # now output data every 250 milliseconds while (1): sensor.update() sensor.getAccelerometer(x, y, z) print("Accelerometer x:", sensorObj.floatp_value(x), end=' ') print(" y:", sensorObj.floatp_value(y), end=' ') print(" z:", sensorObj.floatp_value(z), end=' ') print(" g") sensor.getGyroscope(x, y, z) print("Gyroscope x:", sensorObj.floatp_value(x), end=' ') print(" y:", sensorObj.floatp_value(y), end=' ') print(" z:", sensorObj.floatp_value(z), end=' ') print(" degrees/s") sensor.getMagnetometer(x, y, z) print("Magnetometer x:", sensorObj.floatp_value(x), end=' ') print(" y:", sensorObj.floatp_value(y), end=' ') print(" z:", sensorObj.floatp_value(z), end=' ') print(" uT") print() time.sleep(.250) if __name__ == '__main__': main()
whbruce/upm
examples/python/bmx055.py
Python
mit
2,781
0.001798
""" Special purpose k - medoids algorithm """ import numpy as np def fit(sim_mat, D_len, cidx): """ Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters """ min_energy = np.inf for j in range(3): # select indices in each sample that maximizes its dimension inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat] cidx = [] energy = 0 # current enengy for i in np.unique(inds): indsi = np.where(inds == i)[0] # find indices for every cluster minind, min_value = 0, 0 for index, idy in enumerate(indsi): if idy in sim_mat: # value = sum([sim_mat[idy].get(idx,0) for idx in indsi]) value = 0 for idx in indsi: value += sim_mat[idy].get(idx, 0) if value < min_value: minind, min_value = index, value energy += min_value cidx.append(indsi[minind]) # new centers if energy < min_energy: min_energy, inds_min, cidx_min = energy, inds, cidx return inds_min, cidx_min # cluster for every instance, medoids indices
romanorac/discomll
discomll/ensemble/core/k_medoids.py
Python
apache-2.0
1,504
0.001995
import os from pyjs import linker from pyjs import translator from pyjs import util from optparse import OptionParser import pyjs PLATFORM='spidermonkey' APP_TEMPLATE = """ var $wnd = new Object(); $wnd.document = new Object(); var $doc = $wnd.document; var $moduleName = "%(app_name)s"; var $pyjs = new Object(); $pyjs.__modules__ = {}; $pyjs.modules = {}; $pyjs.modules_hash = {}; $pyjs.available_modules = %(available_modules)s; $pyjs.loaded_modules = {}; $pyjs.options = new Object(); $pyjs.options.set_all = function (v) { $pyjs.options.arg_ignore = v; $pyjs.options.arg_count = v; $pyjs.options.arg_is_instance = v; $pyjs.options.arg_instance_type = v; $pyjs.options.arg_kwarg_dup = v; $pyjs.options.arg_kwarg_unexpected_keyword = v; $pyjs.options.arg_kwarg_multiple_values = v; } $pyjs.options.set_all(true); $pyjs.trackstack = []; $pyjs.track = {module:'__main__', lineno: 1}; $pyjs.trackstack.push($pyjs.track); $pyjs.__last_exception_stack__ = null; $pyjs.__last_exception__ = null; /* * prepare app system vars */ $pyjs.platform = 'spidermonkey'; $pyjs.appname = '%(app_name)s'; $pyjs.loadpath = './'; load(%(module_files)s); load(%(js_lib_files)s); /* late static js libs */ %(late_static_js_libs)s try { $pyjs.loaded_modules['pyjslib']('pyjslib'); $pyjs.loaded_modules['pyjslib'].___import___('%(app_name)s', '%(app_name)s', '__main__'); } catch(exception) { var fullMessage = exception.name + ': ' + exception.message; var uri = exception.fileName; //var stack = exception.stack; var line = exception.lineNumber; fullMessage += "\\n at " + uri + ": " + line; print (fullMessage ); //print (stack.toString() ); } """ class SpidermonkeyLinker(linker.BaseLinker): """Spidermonkey linker, which links together files by using the load function of the spidermonkey shell.""" # we derive from mozilla platform_parents = { PLATFORM:['mozilla', 'array_extras'] } def __init__(self, *args, **kwargs): kwargs['platforms'] = [PLATFORM] super(SpidermonkeyLinker, self).__init__(*args, **kwargs) def visit_start(self): super(SpidermonkeyLinker, self).visit_start() self.js_libs.append('_pyjs.js') self.merged_public = set() def merge_resources(self, dir_name): """find the absolute paths of js includes""" if not self.js_libs or dir_name in self.merged_public: return public_folder = os.path.join(dir_name, 'public') if not os.path.isdir(public_folder): return for i, js_lib in enumerate(self.js_libs): p = os.path.join(public_folder, js_lib) if os.path.isfile(p): self.js_libs[i] = p def visit_end(self): def static_code(libs, msg = None): code = [] for lib in libs: fname = lib if not os.path.isfile(fname): fname = os.path.join(self.output, lib) if not os.path.isfile(fname): raise RuntimeError('File not found %r' % lib) if fname[len_ouput_dir:] == self.output: name = fname[len_ouput_dir:] else: name = os.path.basename(lib) if not msg is None: code.append("/* start %s: %s */" % (msg, name)) f = file(fname) code.append(f.read()) if not msg is None: code.append("/* end %s */" % (name,)) self.remove_files[fname] = True fname = fname.split('.') if fname[-2] == '__%s__' % platform_name: del fname[-2] fname = '.'.join(fname) if os.path.isfile(fname): self.remove_files[fname] = True return "\n".join(code) done = self.done[PLATFORM] # locals - go into template via locals() module_files=str(done)[1:-1] js_lib_files=str(self.js_libs)[1:-1] early_static_js_libs=str(self.js_libs)[1:-1] late_static_js_libs = [] + self.late_static_js_libs late_static_js_libs = static_code(late_static_js_libs, "javascript lib") app_name = self.top_module available_modules = self.visited_modules[PLATFORM] out_file = open( os.path.join(self.output, self.top_module + '.js'), 'w') out_file.write(APP_TEMPLATE % locals()) out_file.close() def build_script(): usage = """ usage: %prog [options] module_name """ parser = OptionParser(usage = usage) translator.add_compile_options(parser) # override the default because we want print parser.set_defaults(print_statements=True) linker.add_linker_options(parser) options, args = parser.parse_args() if len(args) != 1: parser.error("incorrect number of arguments") top_module = args[0] for d in options.library_dirs: pyjs.path.append(os.path.abspath(d)) translator_arguments=dict( debug=options.debug, print_statements = options.print_statements, function_argument_checking=options.function_argument_checking, attribute_checking=options.attribute_checking, source_tracking=options.source_tracking, line_tracking=options.line_tracking, store_source=options.store_source ) l = SpidermonkeyLinker(top_module, output=options.output, platforms=[PLATFORM], path=pyjs.path, translator_arguments=translator_arguments) l()
andreyvit/pyjamas
pyjs/src/pyjs/sm.py
Python
apache-2.0
5,725
0.003493
import time from django import forms from django.forms.util import ErrorDict from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.utils.crypto import salted_hmac, constant_time_compare from django.utils.encoding import force_text from django.utils.text import get_text_list from django.utils import timezone from django.utils.translation import ungettext, ugettext, ugettext_lazy as _ from opendebates_comments.models import Comment COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH', 3000) class CommentSecurityForm(forms.Form): """ Handles the security aspects (anti-spoofing) for comment forms. """ object_id = forms.CharField(widget=forms.HiddenInput) timestamp = forms.IntegerField(widget=forms.HiddenInput) security_hash = forms.CharField(min_length=40, max_length=40, widget=forms.HiddenInput) def __init__(self, target_object, data=None, initial=None): self.target_object = target_object if initial is None: initial = {} initial.update(self.generate_security_data()) super(CommentSecurityForm, self).__init__(data=data, initial=initial) def security_errors(self): """Return just those errors associated with security""" errors = ErrorDict() for f in ["honeypot", "timestamp", "security_hash"]: if f in self.errors: errors[f] = self.errors[f] return errors def clean_security_hash(self): """Check the security hash.""" security_hash_dict = { 'object_id' : self.data.get("object_id", ""), 'timestamp' : self.data.get("timestamp", ""), } expected_hash = self.generate_security_hash(**security_hash_dict) actual_hash = self.cleaned_data["security_hash"] if not constant_time_compare(expected_hash, actual_hash): raise forms.ValidationError("Security hash check failed.") return actual_hash def clean_timestamp(self): """Make sure the timestamp isn't too far (> 2 hours) in the past.""" ts = self.cleaned_data["timestamp"] if time.time() - ts > (2 * 60 * 60): raise forms.ValidationError("Timestamp check failed") return ts def generate_security_data(self): """Generate a dict of security data for "initial" data.""" timestamp = int(time.time()) security_dict = { 'object_id' : str(self.target_object.id), 'timestamp' : str(timestamp), 'security_hash' : self.initial_security_hash(timestamp), } return security_dict def initial_security_hash(self, timestamp): """ Generate the initial security hash from self.content_object and a (unix) timestamp. """ initial_security_dict = { 'object_id' : str(self.target_object.id), 'timestamp' : str(timestamp), } return self.generate_security_hash(**initial_security_dict) def generate_security_hash(self, object_id, timestamp): """ Generate a HMAC security hash from the provided info. """ info = (object_id, timestamp) key_salt = "django.contrib.forms.CommentSecurityForm" value = "-".join(info) return salted_hmac(key_salt, value).hexdigest() class CommentDetailsForm(CommentSecurityForm): """ Handles the specific details of the comment (name, comment, etc.). """ name = forms.CharField(label=_("Name"), max_length=50, widget=forms.HiddenInput) email = forms.EmailField(label=_("Email address"), widget=forms.HiddenInput) url = forms.URLField(label=_("URL"), required=False, widget=forms.HiddenInput) comment = forms.CharField(label=_('Comment'), widget=forms.Textarea, max_length=COMMENT_MAX_LENGTH) def get_comment_object(self): """ Return a new (unsaved) comment object based on the information in this form. Assumes that the form is already validated and will throw a ValueError if not. Does not set any of the fields that would come from a Request object (i.e. ``user`` or ``ip_address``). """ if not self.is_valid(): raise ValueError("get_comment_object may only be called on valid forms") CommentModel = self.get_comment_model() new = CommentModel(**self.get_comment_create_data()) new = self.check_for_duplicate_comment(new) return new def get_comment_model(self): """ Get the comment model to create with this form. Subclasses in custom comment apps should override this, get_comment_create_data, and perhaps check_for_duplicate_comment to provide custom comment models. """ return Comment def get_comment_create_data(self): """ Returns the dict of data to be used to create a comment. Subclasses in custom comment apps that override get_comment_model can override this method to add extra fields onto a custom comment model. """ return dict( object_id = force_text(self.target_object.id), comment = self.cleaned_data["comment"], submit_date = timezone.now(), is_public = True, is_removed = False, ) def check_for_duplicate_comment(self, new): """ Check that a submitted comment isn't a duplicate. This might be caused by someone posting a comment twice. If it is a dup, silently return the *previous* comment. """ possible_duplicates = self.get_comment_model()._default_manager.using( self.target_object._state.db ).filter( object_id = new.id, ) for old in possible_duplicates: if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment: return old return new def clean_comment(self): """ If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't contain anything in PROFANITIES_LIST. """ comment = self.cleaned_data["comment"] if getattr(settings, 'COMMENTS_ALLOW_PROFANITIES', True) == False: bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()] if bad_words: raise forms.ValidationError(ungettext( "Watch your mouth! The word %s is not allowed here.", "Watch your mouth! The words %s are not allowed here.", len(bad_words)) % get_text_list( ['"%s%s%s"' % (i[0], '-'*(len(i)-2), i[-1]) for i in bad_words], ugettext('and'))) return comment class CommentForm(CommentDetailsForm): honeypot = forms.CharField(required=False, label=_('If you enter anything in this field '\ 'your comment will be treated as spam')) def clean_honeypot(self): """Check that nothing's been entered into the honeypot.""" value = self.cleaned_data["honeypot"] if value: raise forms.ValidationError(self.fields["honeypot"].label) return value
boldprogressives/django-opendebates
opendebates/opendebates_comments/forms.py
Python
apache-2.0
7,408
0.006479
''' This module should be run to recreate the files that we generate automatically (i.e.: modules that shouldn't be traced and cython .pyx) ''' from __future__ import print_function import os import struct def is_python_64bit(): return (struct.calcsize('P') == 8) root_dir = os.path.join(os.path.dirname(__file__), '..') def get_cython_contents(filename): if filename.endswith('.pyc'): filename = filename[:-1] state = 'regular' new_contents = [] with open(filename, 'r') as stream: for line in stream: strip = line.strip() if state == 'regular': if strip == '# IFDEF CYTHON': state = 'cython' new_contents.append('%s -- DONT EDIT THIS FILE (it is automatically generated)\n' % line.replace('\n', '').replace('\r', '')) continue new_contents.append(line) elif state == 'cython': if strip == '# ELSE': state = 'nocython' new_contents.append(line) continue elif strip == '# ENDIF': state = 'regular' new_contents.append(line) continue assert strip.startswith('# '), 'Line inside # IFDEF CYTHON must start with "# ".' new_contents.append(line.replace('# ', '', 1)) elif state == 'nocython': if strip == '# ENDIF': state = 'regular' new_contents.append(line) continue new_contents.append('# %s' % line) assert state == 'regular', 'Error: # IFDEF CYTHON found without # ENDIF' return ''.join(new_contents) def _generate_cython_from_files(target, modules): contents = ['''# Important: Autogenerated file. # DO NOT edit manually! # DO NOT edit manually! '''] for mod in modules: contents.append(get_cython_contents(mod.__file__)) with open(target, 'w') as stream: stream.write(''.join(contents)) def generate_dont_trace_files(): template = '''# Important: Autogenerated file. # DO NOT edit manually! # DO NOT edit manually! from _pydevd_bundle.pydevd_constants import IS_PY3K LIB_FILE = 1 PYDEV_FILE = 2 DONT_TRACE = { # commonly used things from the stdlib that we don't want to trace 'Queue.py':LIB_FILE, 'queue.py':LIB_FILE, 'socket.py':LIB_FILE, 'weakref.py':LIB_FILE, '_weakrefset.py':LIB_FILE, 'linecache.py':LIB_FILE, 'threading.py':LIB_FILE, #things from pydev that we don't want to trace '_pydev_execfile.py':PYDEV_FILE, %(pydev_files)s } if IS_PY3K: # if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716) DONT_TRACE['io.py'] = LIB_FILE # Don't trace common encodings too DONT_TRACE['cp1252.py'] = LIB_FILE DONT_TRACE['utf_8.py'] = LIB_FILE ''' pydev_files = [] for root, dirs, files in os.walk(root_dir): for d in [ '.git', '.settings', 'build', 'build_tools', 'dist', 'pydevd.egg-info', 'pydevd_attach_to_process', 'pydev_sitecustomize', 'stubs', 'tests', 'tests_mainloop', 'tests_python', 'tests_runfiles', 'test_pydevd_reload', 'third_party', '__pycache__', '_pydev_runfiles', 'pydev_ipython', ]: try: dirs.remove(d) except: pass for f in files: if f.endswith('.py'): if f not in ( '__init__.py', 'runfiles.py', 'pydev_coverage.py', 'pydev_pysrc.py', 'setup.py', 'setup_cython.py', 'interpreterInfo.py', ): pydev_files.append(" '%s': PYDEV_FILE," % (f,)) contents = template % (dict(pydev_files='\n'.join(sorted(pydev_files)))) assert 'pydevd.py' in contents assert 'pydevd_dont_trace.py' in contents with open(os.path.join(root_dir, '_pydevd_bundle', 'pydevd_dont_trace_files.py'), 'w') as stream: stream.write(contents) def remove_if_exists(f): try: if os.path.exists(f): os.remove(f) except: import traceback;traceback.print_exc() def generate_cython_module(): remove_if_exists(os.path.join(root_dir, '_pydevd_bundle', 'pydevd_cython.pyx')) target = os.path.join(root_dir, '_pydevd_bundle', 'pydevd_cython.pyx') curr = os.environ.get('PYDEVD_USE_CYTHON') try: os.environ['PYDEVD_USE_CYTHON'] = 'NO' from _pydevd_bundle import pydevd_additional_thread_info_regular from _pydevd_bundle import pydevd_frame, pydevd_trace_dispatch_regular _generate_cython_from_files(target, [pydevd_additional_thread_info_regular, pydevd_frame, pydevd_trace_dispatch_regular]) finally: if curr is None: del os.environ['PYDEVD_USE_CYTHON'] else: os.environ['PYDEVD_USE_CYTHON'] = curr if __name__ == '__main__': generate_dont_trace_files() generate_cython_module()
idea4bsd/idea4bsd
python/helpers/pydev/build_tools/generate_code.py
Python
apache-2.0
5,381
0.003531
# # Copyright (c) 2015 Juniper Networks, Inc. All rights reserved. # from gevent import monkey monkey.patch_all() from pysandesh.sandesh_base import sandesh_global from sandesh_common.vns.ttypes import Module from nodemgr.common.event_manager import EventManager, EventManagerTypeInfo class ConfigEventManager(EventManager): def __init__(self, config, unit_names): type_info = EventManagerTypeInfo( module_type=Module.CONFIG_NODE_MGR, object_table='ObjectConfigNode') super(ConfigEventManager, self).__init__(config, type_info, sandesh_global, unit_names)
eonpatapon/contrail-controller
src/nodemgr/config_nodemgr/event_manager.py
Python
apache-2.0
616
0.006494
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2022 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## import contextlib import warnings from _decimal import Decimal from typing import Optional, List from django.db import IntegrityError from django.db.models import F, Case, When, IntegerField, QuerySet, Max, OuterRef, Subquery from django.db.models import Q from base.models.academic_year import AcademicYear from base.models.education_group_year import EducationGroupYear from base.models.enums.education_group_categories import Categories from education_group.ddd.domain.exception import TrainingNotFoundException from education_group.models.group import Group from education_group.models.group_year import GroupYear from osis_common.ddd import interface from osis_common.ddd.interface import RootEntity from program_management import formatter from program_management.ddd import command from program_management.ddd.business_types import * from program_management.ddd.domain import exception from program_management.ddd.domain import program_tree from program_management.ddd.domain import program_tree_version from program_management.ddd.domain.exception import ProgramTreeVersionNotFoundException from program_management.ddd.domain.program_tree_version import ProgramTreeVersionIdentity, STANDARD, NOT_A_TRANSITION from program_management.ddd.dtos import UniteEnseignementDTO, ContenuNoeudDTO, ProgrammeDeFormationDTO from program_management.ddd.repositories import program_tree as program_tree_repository from program_management.models.education_group_version import EducationGroupVersion class ProgramTreeVersionRepository(interface.AbstractRepository): @classmethod def save(cls, entity: RootEntity) -> None: raise NotImplementedError @classmethod def create( cls, program_tree_version: 'ProgramTreeVersion', **_ ) -> 'ProgramTreeVersionIdentity': warnings.warn("DEPRECATED : use .save() function instead", DeprecationWarning, stacklevel=2) offer_acronym = program_tree_version.entity_id.offer_acronym year = program_tree_version.entity_id.year try: education_group_year_id = EducationGroupYear.objects.filter( acronym=offer_acronym, academic_year__year=year, ).values_list( 'pk', flat=True )[0] except IndexError: raise TrainingNotFoundException(acronym=offer_acronym, year=year) group_year_id = GroupYear.objects.filter( partial_acronym=program_tree_version.program_tree_identity.code, academic_year__year=program_tree_version.program_tree_identity.year, ).values_list( 'pk', flat=True )[0] try: educ_group_version = EducationGroupVersion.objects.create( version_name=program_tree_version.version_name, title_fr=program_tree_version.title_fr, title_en=program_tree_version.title_en, offer_id=education_group_year_id, transition_name=program_tree_version.entity_id.transition_name, root_group_id=group_year_id, ) _update_start_year_and_end_year( educ_group_version, program_tree_version.start_year, program_tree_version.end_year_of_existence ) except IntegrityError as ie: raise exception.ProgramTreeAlreadyExistsException return program_tree_version.entity_id @classmethod def update(cls, program_tree_version: 'ProgramTreeVersion', **_) -> 'ProgramTreeVersionIdentity': warnings.warn("DEPRECATED : use .save() function instead", DeprecationWarning, stacklevel=2) obj = EducationGroupVersion.objects.get( offer__acronym=program_tree_version.entity_identity.offer_acronym, offer__academic_year__year=program_tree_version.entity_identity.year, version_name=program_tree_version.entity_identity.version_name, transition_name=program_tree_version.entity_identity.transition_name, ) obj.version_name = program_tree_version.version_name obj.title_fr = program_tree_version.title_fr obj.title_en = program_tree_version.title_en obj.save() _update_start_year_and_end_year( obj, program_tree_version.start_year, program_tree_version.end_year_of_existence ) return program_tree_version.entity_id @classmethod def get(cls, entity_id: 'ProgramTreeVersionIdentity') -> 'ProgramTreeVersion': qs = _get_common_queryset().filter( version_name=entity_id.version_name, offer__acronym=entity_id.offer_acronym, offer__academic_year__year=entity_id.year, transition_name=entity_id.transition_name, ) try: return _instanciate_tree_version(qs.get()) except EducationGroupVersion.DoesNotExist: raise exception.ProgramTreeVersionNotFoundException() @classmethod def get_last_in_past(cls, entity_id: 'ProgramTreeVersionIdentity') -> 'ProgramTreeVersion': qs = EducationGroupVersion.objects.filter( version_name=entity_id.version_name, offer__acronym=entity_id.offer_acronym, offer__academic_year__year__lt=entity_id.year, transition_name=entity_id.transition_name ).order_by( 'offer__academic_year' ).values_list( 'offer__academic_year__year', flat=True, ) if qs: last_past_year = qs.last() last_identity = ProgramTreeVersionIdentity( offer_acronym=entity_id.offer_acronym, year=last_past_year, version_name=entity_id.version_name, transition_name=entity_id.transition_name, ) return cls.get(entity_id=last_identity) @classmethod def search( cls, entity_ids: Optional[List['ProgramTreeVersionIdentity']] = None, version_name: str = None, offer_acronym: str = None, transition_name: str = None, code: str = None, year: int = None, **kwargs ) -> List['ProgramTreeVersion']: qs = _get_common_queryset() if "element_ids" in kwargs: qs = qs.filter(root_group__element__in=kwargs['element_ids']) if version_name is not None: qs = qs.filter(version_name=version_name) if offer_acronym is not None: qs = qs.filter(offer__acronym=offer_acronym) if transition_name is not None: qs = qs.filter(transition_name=transition_name) if year is not None: qs = qs.filter(offer__academic_year__year=year) if code is not None: qs = qs.filter(root_group__partial_acronym=code) results = [] for record_dict in qs: results.append(_instanciate_tree_version(record_dict)) return results @classmethod def delete( cls, entity_id: 'ProgramTreeVersionIdentity', delete_program_tree_service: interface.ApplicationService = None ) -> None: program_tree_version = cls.get(entity_id) EducationGroupVersion.objects.filter( version_name=entity_id.version_name, offer__acronym=entity_id.offer_acronym, offer__academic_year__year=entity_id.year, transition_name=entity_id.transition_name, ).delete() root_node = program_tree_version.get_tree().root_node cmd = command.DeleteProgramTreeCommand(code=root_node.code, year=root_node.year) delete_program_tree_service(cmd) @classmethod def search_all_versions_from_root_node(cls, root_node_identity: 'NodeIdentity') -> List['ProgramTreeVersion']: offer_ids = EducationGroupVersion.objects.filter( root_group__partial_acronym=root_node_identity.code, root_group__academic_year__year=root_node_identity.year ).values_list('offer_id', flat=True) return _search_versions_from_offer_ids(list(offer_ids)) @classmethod def search_all_versions_from_root_nodes(cls, node_identities: List['NodeIdentity']) -> List['ProgramTreeVersion']: offer_ids = _search_by_node_entities(list(node_identities)) return _search_versions_from_offer_ids(offer_ids) @classmethod def search_versions_from_trees(cls, trees: List['ProgramTree']) -> List['ProgramTreeVersion']: root_nodes_identities = [tree.root_node.entity_id for tree in trees] tree_versions = cls.search_all_versions_from_root_nodes(root_nodes_identities) result = [] for tree_version in tree_versions: with contextlib.suppress(StopIteration): tree_version.tree = next(tree for tree in trees if tree.entity_id == tree_version.program_tree_identity) result.append(tree_version) return result @classmethod def search_last_occurence(cls, from_year: int) -> List['ProgramTreeVersion']: subquery_max_existing_year_for_offer = EducationGroupVersion.objects.filter( offer__academic_year__year__gte=from_year, offer__education_group=OuterRef("offer__education_group"), version_name=OuterRef('version_name'), transition_name=OuterRef('transition_name') ).values( "offer__education_group" ).annotate( max_year=Max("offer__academic_year__year") ).order_by( "offer__education_group" ).values("max_year") qs = _get_common_queryset().filter( offer__academic_year__year=Subquery(subquery_max_existing_year_for_offer[:1]) ) results = [] for record_dict in qs: results.append(_instanciate_tree_version(record_dict)) return results @classmethod def get_dto(cls, identity: ProgramTreeVersionIdentity) -> Optional['ProgrammeDeFormationDTO']: pgm_tree_version = cls.get(identity) return build_dto(pgm_tree_version, identity) @classmethod def get_dto_from_year_and_code(cls, code: str, year: int) -> Optional['ProgrammeDeFormationDTO']: pgm_tree_version = cls.search(code=code, year=year) if pgm_tree_version: return build_dto(pgm_tree_version[0], pgm_tree_version[0].entity_identity) raise ProgramTreeVersionNotFoundException def _update_start_year_and_end_year( educ_group_version: EducationGroupVersion, start_year: int, end_year_of_existence: int ): # FIXME :: should add a field EducationgroupVersion.end_year # FIXME :: and should remove GroupYear.end_year # FIXME :: End_year is useful only for EducationGroupYear (training, minitraining) and programTreeVersions. # FIXME :: End year is not useful for Groups. For business, Group doesn't have a 'end date'. group = Group.objects.get( groupyear__educationgroupversion__pk=educ_group_version.pk ) end_year_id = None if end_year_of_existence: end_year_id = AcademicYear.objects.only('pk').get(year=end_year_of_existence).pk group.end_year_id = end_year_id group.start_year_id = AcademicYear.objects.only('pk').get(year=start_year).pk group.save() def _instanciate_tree_version(record_dict: dict) -> 'ProgramTreeVersion': identity = program_tree_version.ProgramTreeVersionIdentity( offer_acronym=record_dict['offer_acronym'], year=record_dict['offer_year'], version_name=record_dict['version_name'], transition_name=record_dict['transition_name'], ) return program_tree_version.ProgramTreeVersion( entity_identity=identity, entity_id=identity, program_tree_identity=program_tree.ProgramTreeIdentity(record_dict['code'], record_dict['offer_year']), program_tree_repository=program_tree_repository.ProgramTreeRepository(), start_year=record_dict['start_year'], title_fr=record_dict['version_title_fr'], title_en=record_dict['version_title_en'], end_year_of_existence=record_dict['end_year_of_existence'], ) def _search_by_node_entities(entity_ids: List['NodeIdentity']) -> List[int]: if bool(entity_ids): qs = EducationGroupVersion.objects.all().values_list('offer_id', flat=True) filter_search_from = _build_where_clause(entity_ids[0]) for identity in entity_ids[1:]: filter_search_from |= _build_where_clause(identity) qs = qs.filter(filter_search_from) return list(qs) return [] def _build_where_clause(node_identity: 'NodeIdentity') -> Q: return Q( Q( root_group__partial_acronym=node_identity.code, root_group__academic_year__year=node_identity.year ) ) def _search_versions_from_offer_ids(offer_ids: List[int]) -> List['ProgramTreeVersion']: qs = _get_common_queryset() qs = qs.filter( offer_id__in=offer_ids, ) results = [] for record_dict in qs: results.append(_instanciate_tree_version(record_dict)) return results def _get_common_queryset() -> QuerySet: return EducationGroupVersion.objects.all().order_by( 'version_name' ).annotate( code=F('root_group__partial_acronym'), offer_acronym=F('offer__acronym'), offer_year=F('offer__academic_year__year'), version_title_fr=F('title_fr'), version_title_en=F('title_en'), # FIXME :: should add a field EducationgroupVersion.end_year # FIXME :: and should remove GroupYear.end_year # FIXME :: End_year is useful only for EducationGroupYear (training, minitraining) and programTreeVersions. # FIXME :: End year is not useful for Groups. For business, Group doesn't have a 'end date'. end_year_of_existence=Case( When( Q( offer__education_group_type__category__in={ Categories.TRAINING.name, Categories.MINI_TRAINING.name } ) & Q( version_name=STANDARD ) & Q( transition_name=NOT_A_TRANSITION ), then=F('offer__education_group__end_year__year') ), default=F('root_group__group__end_year__year'), output_field=IntegerField(), ), start_year=Case( When( Q( offer__education_group_type__category__in={ Categories.TRAINING.name, Categories.MINI_TRAINING.name } ) & Q( version_name=STANDARD ) & Q( transition_name=NOT_A_TRANSITION ), then=F('offer__education_group__start_year__year') ), default=F('root_group__group__start_year__year'), output_field=IntegerField(), ), ).values( 'code', 'offer_acronym', 'offer_year', 'version_name', 'version_title_fr', 'version_title_en', 'transition_name', 'end_year_of_existence', 'start_year', ) def build_dto(pgm_tree_version: 'ProgramTreeVersion', identity: ProgramTreeVersionIdentity) \ -> 'ProgrammeDeFormationDTO': tree = pgm_tree_version.get_tree() contenu = _build_contenu(tree.root_node, ) return ProgrammeDeFormationDTO( racine=contenu, annee=identity.year, sigle=identity.offer_acronym, version=identity.version_name, intitule_formation="{}{}".format( tree.root_node.offer_title_fr, "{}".format("[ {} ]".format(pgm_tree_version.title_fr) if pgm_tree_version.title_fr else '') ), code=tree.entity_id.code, transition_name=identity.transition_name ) def _build_contenu(node: 'Node', lien_parent: 'Link' = None) -> 'ContenuNoeudDTO': contenu_ordonne = [] for lien in node.children: if lien.child.is_learning_unit(): contenu_ordonne.append( UniteEnseignementDTO( bloc=lien.block, code=lien.child.code, intitule_complet=lien.child.title, quadrimestre=lien.child.quadrimester, quadrimestre_texte=lien.child.quadrimester.value if lien.child.quadrimester else "", credits_absolus=lien.child.credits, volume_annuel_pm=lien.child.volume_total_lecturing, volume_annuel_pp=lien.child.volume_total_practical, obligatoire=lien.is_mandatory if lien else False, session_derogation='', credits_relatifs=lien.relative_credits, ) ) else: groupement_contenu = _build_contenu(lien.child, lien_parent=lien) contenu_ordonne.append(groupement_contenu) return ContenuNoeudDTO( code=node.code, intitule=node.title, remarque=node.remark_fr, obligatoire=lien_parent.is_mandatory if lien_parent else False, credits=_get_credits(lien_parent), intitule_complet=get_verbose_title_group(node), contenu_ordonne=contenu_ordonne, ) def get_verbose_title_group(node: 'NodeGroupYear') -> str: if node.is_finality(): return format_complete_title_label(node, node.offer_partial_title_fr) if node.is_option(): return format_complete_title_label(node, node.offer_title_fr) else: return node.group_title_fr def format_complete_title_label(node, title_fr) -> str: version_complete_label = formatter.format_version_complete_name(node, "fr-be") return "{}{}".format(title_fr, version_complete_label) def _get_credits(link: 'Link') -> Optional[Decimal]: if link: return link.relative_credits or link.child.credits or 0 return None
uclouvain/osis
program_management/ddd/repositories/program_tree_version.py
Python
agpl-3.0
19,400
0.002165
#!/usr/bin/env python # Copyright 2009-2014 Eucalyptus Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
nephomaniac/eucio
eucio/topology/userfacing/__init__.py
Python
apache-2.0
611
0.001637
# -*- coding: utf-8 -*- """ """ from __future__ import unicode_literals import logging import os import hashlib logger = logging.getLogger(__name__) _log = "pelican_comment_system: avatars: " try: from . identicon import identicon _identiconImported = True except ImportError as e: logger.warning(_log + "identicon deactivated: " + str(e)) _identiconImported = False # Global Variables _identicon_save_path = None _identicon_output_path = None _identicon_data = None _identicon_size = None _initialized = False _authors = None _missingAvatars = [] def _ready(): if not _initialized: logger.warning(_log + "Module not initialized. use init") if not _identicon_data: logger.debug(_log + "No identicon data set") return _identiconImported and _initialized and _identicon_data def init(pelican_output_path, identicon_output_path, identicon_data, identicon_size, authors): global _identicon_save_path global _identicon_output_path global _identicon_data global _identicon_size global _initialized global _authors _identicon_save_path = os.path.join(pelican_output_path, identicon_output_path) _identicon_output_path = identicon_output_path _identicon_data = identicon_data _identicon_size = identicon_size _authors = authors _initialized = True def _createIdenticonOutputFolder(): if not _ready(): return if not os.path.exists(_identicon_save_path): os.makedirs(_identicon_save_path) def getAvatarPath(comment_id, metadata): if not _ready(): return '' md5 = hashlib.md5() author = tuple() for data in _identicon_data: if data in metadata: string = str(metadata[data]) md5.update(string.encode('utf-8')) author += tuple([string]) else: logger.warning(_log + data + " is missing in comment: " + comment_id) if author in _authors: return _authors[author] global _missingAvatars code = md5.hexdigest() if not code in _missingAvatars: _missingAvatars.append(code) return os.path.join(_identicon_output_path, '%s.png' % code) def generateAndSaveMissingAvatars(): _createIdenticonOutputFolder() for code in _missingAvatars: avatar_path = '%s.png' % code avatar = identicon.render_identicon(int(code, 16), _identicon_size) avatar_save_path = os.path.join(_identicon_save_path, avatar_path) avatar.save(avatar_save_path, 'PNG')
znegva/pelican-plugins
pelican_comment_system/avatars.py
Python
agpl-3.0
2,305
0.023861
''' Created on Nov 10, 2014 @author: lauritz ''' from mock import Mock from fakelargefile.segmenttail import OverlapSearcher def test_index_iter_stop(): os = OverlapSearcher("asdf") segment = Mock() segment.start = 11 try: os.index_iter(segment, stop=10).next() except ValueError: assert True else: assert False
LauritzThaulow/fakelargefile
tests/test_segmenttail.py
Python
agpl-3.0
365
0
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Static() result.template = "object/static/structure/general/shared_palette_supply_01.iff" result.attribute_template_id = -1 result.stfName("obj_n","unknown_object") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
anhstudios/swganh
data/scripts/templates/object/static/structure/general/shared_palette_supply_01.py
Python
mit
455
0.048352
# -*- coding: utf-8 -*- # Copyright (C) Duncan Macleod (2013) # # This file is part of GWSumm. # # GWSumm is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GWSumm is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWSumm. If not, see <http://www.gnu.org/licenses/>. """A `Plot` is a representation of an image to be included in the HTML output a :doc:`tab </tabs>`. For simple purposes, a `Plot` is just a reference to an existing image file that can be imported into an HTML page via the ``<img>`` tag. For more complicated purposes, a number of data plot classes are provided to allow users to generate images on-the-fly. The available classes are: .. autosummary:: :toctree: api TimeSeriesDataPlot SpectrogramDataPlot SegmentDataPlot StateVectorDataPlot SpectrumDataPlot TimeSeriesHistogramPlot TriggerTimeSeriesDataPlot TriggerHistogramPlot TriggerRateDataPlot """ __author__ = 'Duncan Macleod <duncan.macleod@ligo.org>' from .registry import * from .utils import * from .core import * from .builtin import * from .segments import * from .triggers import * from .range import * from .noisebudget import * from .guardian import * from .sei import *
duncanmmacleod/gwsumm
gwsumm/plot/__init__.py
Python
gpl-3.0
1,636
0
"""Integration project URL Configuration""" from django.contrib import admin from django.urls import re_path from django.views.generic import TemplateView urlpatterns = [ re_path(r"^admin/", admin.site.urls), re_path( r"^$", TemplateView.as_view(template_name="home.html"), name="home" ), ]
jambonsw/django-improved-user
example_integration_project/config/urls.py
Python
bsd-2-clause
312
0
# -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from tencentcloud.common.abstract_model import AbstractModel class ArtifactReduction(AbstractModel): """去编码毛刺、伪影参数 """ def __init__(self): r""" :param Type: 去毛刺方式:weak,,strong :type Type: str :param Algorithm: 去毛刺算法,可选项: edaf, wdaf, 默认edaf。 注意:此参数已经弃用 :type Algorithm: str """ self.Type = None self.Algorithm = None def _deserialize(self, params): self.Type = params.get("Type") self.Algorithm = params.get("Algorithm") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AudioEnhance(AbstractModel): """音频音效增强,只支持无背景音的音频 """ def __init__(self): r""" :param Type: 音效增强种类,可选项:normal :type Type: str """ self.Type = None def _deserialize(self, params): self.Type = params.get("Type") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AudioInfo(AbstractModel): """音频参数信息 """ def __init__(self): r""" :param Bitrate: 音频码率,取值范围:0 和 [26, 256],单位:kbps。 注意:当取值为 0,表示音频码率和原始音频保持一致。 :type Bitrate: int :param Codec: 音频编码器,可选项:aac,mp3,ac3,flac,mp2。 :type Codec: str :param Channel: 声道数,可选项: 1:单声道, 2:双声道, 6:立体声。 :type Channel: int :param SampleRate: 采样率,单位:Hz。可选项:32000,44100,48000 :type SampleRate: int :param Denoise: 音频降噪信息 :type Denoise: :class:`tencentcloud.ie.v20200304.models.Denoise` :param EnableMuteAudio: 开启添加静音,可选项: 0:不开启, 1:开启, 默认不开启 :type EnableMuteAudio: int :param LoudnessInfo: 音频响度信息 :type LoudnessInfo: :class:`tencentcloud.ie.v20200304.models.LoudnessInfo` :param AudioEnhance: 音频音效增强 :type AudioEnhance: :class:`tencentcloud.ie.v20200304.models.AudioEnhance` :param RemoveReverb: 去除混音 :type RemoveReverb: :class:`tencentcloud.ie.v20200304.models.RemoveReverb` """ self.Bitrate = None self.Codec = None self.Channel = None self.SampleRate = None self.Denoise = None self.EnableMuteAudio = None self.LoudnessInfo = None self.AudioEnhance = None self.RemoveReverb = None def _deserialize(self, params): self.Bitrate = params.get("Bitrate") self.Codec = params.get("Codec") self.Channel = params.get("Channel") self.SampleRate = params.get("SampleRate") if params.get("Denoise") is not None: self.Denoise = Denoise() self.Denoise._deserialize(params.get("Denoise")) self.EnableMuteAudio = params.get("EnableMuteAudio") if params.get("LoudnessInfo") is not None: self.LoudnessInfo = LoudnessInfo() self.LoudnessInfo._deserialize(params.get("LoudnessInfo")) if params.get("AudioEnhance") is not None: self.AudioEnhance = AudioEnhance() self.AudioEnhance._deserialize(params.get("AudioEnhance")) if params.get("RemoveReverb") is not None: self.RemoveReverb = RemoveReverb() self.RemoveReverb._deserialize(params.get("RemoveReverb")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AudioInfoResultItem(AbstractModel): """任务结束后生成的文件音频信息 """ def __init__(self): r""" :param Stream: 音频流的流id。 :type Stream: int :param Sample: 音频采样率 。 注意:此字段可能返回 null,表示取不到有效值。 :type Sample: int :param Channel: 音频声道数。 注意:此字段可能返回 null,表示取不到有效值。 :type Channel: int :param Codec: 编码格式,如aac, mp3等。 注意:此字段可能返回 null,表示取不到有效值。 :type Codec: str :param Bitrate: 码率,单位:bps。 注意:此字段可能返回 null,表示取不到有效值。 :type Bitrate: int :param Duration: 音频时长,单位:ms。 注意:此字段可能返回 null,表示取不到有效值。 :type Duration: int """ self.Stream = None self.Sample = None self.Channel = None self.Codec = None self.Bitrate = None self.Duration = None def _deserialize(self, params): self.Stream = params.get("Stream") self.Sample = params.get("Sample") self.Channel = params.get("Channel") self.Codec = params.get("Codec") self.Bitrate = params.get("Bitrate") self.Duration = params.get("Duration") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CallbackInfo(AbstractModel): """任务结果回调地址信息 """ def __init__(self): r""" :param Url: 回调URL。 :type Url: str """ self.Url = None def _deserialize(self, params): self.Url = params.get("Url") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ClassificationEditingInfo(AbstractModel): """视频分类识别任务参数信息 """ def __init__(self): r""" :param Switch: 是否开启视频分类识别。0为关闭,1为开启。其他非0非1值默认为0。 :type Switch: int :param CustomInfo: 额外定制化服务参数。参数为序列化的Json字符串,例如:{"k1":"v1"}。 :type CustomInfo: str """ self.Switch = None self.CustomInfo = None def _deserialize(self, params): self.Switch = params.get("Switch") self.CustomInfo = params.get("CustomInfo") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ClassificationTaskResult(AbstractModel): """视频分类识别结果信息 """ def __init__(self): r""" :param Status: 编辑任务状态。 1:执行中;2:成功;3:失败。 :type Status: int :param ErrCode: 编辑任务失败错误码。 0:成功;其他值:失败。 :type ErrCode: int :param ErrMsg: 编辑任务失败错误描述。 :type ErrMsg: str :param ItemSet: 视频分类识别结果集。 注意:此字段可能返回 null,表示取不到有效值。 :type ItemSet: list of ClassificationTaskResultItem """ self.Status = None self.ErrCode = None self.ErrMsg = None self.ItemSet = None def _deserialize(self, params): self.Status = params.get("Status") self.ErrCode = params.get("ErrCode") self.ErrMsg = params.get("ErrMsg") if params.get("ItemSet") is not None: self.ItemSet = [] for item in params.get("ItemSet"): obj = ClassificationTaskResultItem() obj._deserialize(item) self.ItemSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ClassificationTaskResultItem(AbstractModel): """视频分类识别结果项 """ def __init__(self): r""" :param Classification: 分类名称。 :type Classification: str :param Confidence: 置信度,取值范围是 0 到 100。 :type Confidence: float """ self.Classification = None self.Confidence = None def _deserialize(self, params): self.Classification = params.get("Classification") self.Confidence = params.get("Confidence") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ColorEnhance(AbstractModel): """颜色增强参数 """ def __init__(self): r""" :param Type: 颜色增强类型,可选项: 1. tra; 2. weak; 3. normal; 4. strong; 注意:tra不支持自适应调整,处理速度更快;weak,normal,strong支持基于画面颜色自适应,处理速度更慢。 :type Type: str """ self.Type = None def _deserialize(self, params): self.Type = params.get("Type") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CosAuthMode(AbstractModel): """任务视频cos授权信息 """ def __init__(self): r""" :param Type: 授权类型,可选值: 0:bucket授权,需要将对应bucket授权给本服务帐号(3020447271和100012301793),否则会读写cos失败; 1:key托管,把cos的账号id和key托管于本服务,本服务会提供一个托管id; 3:临时key授权。 注意:目前智能编辑还不支持临时key授权;画质重生目前只支持bucket授权 :type Type: int :param HostedId: cos账号托管id,Type等于1时必选。 :type HostedId: str :param SecretId: cos身份识别id,Type等于3时必选。 :type SecretId: str :param SecretKey: cos身份秘钥,Type等于3时必选。 :type SecretKey: str :param Token: 临时授权 token,Type等于3时必选。 :type Token: str """ self.Type = None self.HostedId = None self.SecretId = None self.SecretKey = None self.Token = None def _deserialize(self, params): self.Type = params.get("Type") self.HostedId = params.get("HostedId") self.SecretId = params.get("SecretId") self.SecretKey = params.get("SecretKey") self.Token = params.get("Token") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CosInfo(AbstractModel): """任务视频cos信息 """ def __init__(self): r""" :param Region: cos 区域值。例如:ap-beijing。 :type Region: str :param Bucket: cos 存储桶,格式为BuketName-AppId。例如:test-123456。 :type Bucket: str :param Path: cos 路径。 对于写表示目录,例如:/test; 对于读表示文件路径,例如:/test/test.mp4。 :type Path: str :param CosAuthMode: cos 授权信息,不填默认为公有权限。 :type CosAuthMode: :class:`tencentcloud.ie.v20200304.models.CosAuthMode` """ self.Region = None self.Bucket = None self.Path = None self.CosAuthMode = None def _deserialize(self, params): self.Region = params.get("Region") self.Bucket = params.get("Bucket") self.Path = params.get("Path") if params.get("CosAuthMode") is not None: self.CosAuthMode = CosAuthMode() self.CosAuthMode._deserialize(params.get("CosAuthMode")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CoverEditingInfo(AbstractModel): """智能封面任务参数信息 """ def __init__(self): r""" :param Switch: 是否开启智能封面。0为关闭,1为开启。其他非0非1值默认为0。 :type Switch: int :param CustomInfo: 额外定制化服务参数。参数为序列化的Json字符串,例如:{"k1":"v1"}。 :type CustomInfo: str """ self.Switch = None self.CustomInfo = None def _deserialize(self, params): self.Switch = params.get("Switch") self.CustomInfo = params.get("CustomInfo") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CoverTaskResult(AbstractModel): """智能封面结果信息 """ def __init__(self): r""" :param Status: 编辑任务状态。 1:执行中;2:成功;3:失败。 :type Status: int :param ErrCode: 编辑任务失败错误码。 0:成功;其他值:失败。 :type ErrCode: int :param ErrMsg: 编辑任务失败错误描述。 :type ErrMsg: str :param ItemSet: 智能封面结果集。 注意:此字段可能返回 null,表示取不到有效值。 :type ItemSet: list of CoverTaskResultItem """ self.Status = None self.ErrCode = None self.ErrMsg = None self.ItemSet = None def _deserialize(self, params): self.Status = params.get("Status") self.ErrCode = params.get("ErrCode") self.ErrMsg = params.get("ErrMsg") if params.get("ItemSet") is not None: self.ItemSet = [] for item in params.get("ItemSet"): obj = CoverTaskResultItem() obj._deserialize(item) self.ItemSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CoverTaskResultItem(AbstractModel): """智能封面结果项 """ def __init__(self): r""" :param CoverUrl: 智能封面地址。 :type CoverUrl: str :param Confidence: 置信度,取值范围是 0 到 100。 :type Confidence: float """ self.CoverUrl = None self.Confidence = None def _deserialize(self, params): self.CoverUrl = params.get("CoverUrl") self.Confidence = params.get("Confidence") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateEditingTaskRequest(AbstractModel): """CreateEditingTask请求参数结构体 """ def __init__(self): r""" :param EditingInfo: 智能编辑任务参数。 :type EditingInfo: :class:`tencentcloud.ie.v20200304.models.EditingInfo` :param DownInfo: 视频源信息。 :type DownInfo: :class:`tencentcloud.ie.v20200304.models.DownInfo` :param SaveInfo: 结果存储信息。对于包含智能拆条、智能集锦或者智能封面的任务必选。 :type SaveInfo: :class:`tencentcloud.ie.v20200304.models.SaveInfo` :param CallbackInfo: 任务结果回调地址信息。 :type CallbackInfo: :class:`tencentcloud.ie.v20200304.models.CallbackInfo` """ self.EditingInfo = None self.DownInfo = None self.SaveInfo = None self.CallbackInfo = None def _deserialize(self, params): if params.get("EditingInfo") is not None: self.EditingInfo = EditingInfo() self.EditingInfo._deserialize(params.get("EditingInfo")) if params.get("DownInfo") is not None: self.DownInfo = DownInfo() self.DownInfo._deserialize(params.get("DownInfo")) if params.get("SaveInfo") is not None: self.SaveInfo = SaveInfo() self.SaveInfo._deserialize(params.get("SaveInfo")) if params.get("CallbackInfo") is not None: self.CallbackInfo = CallbackInfo() self.CallbackInfo._deserialize(params.get("CallbackInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateEditingTaskResponse(AbstractModel): """CreateEditingTask返回参数结构体 """ def __init__(self): r""" :param TaskId: 编辑任务 ID,可以通过该 ID 查询任务状态。 :type TaskId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TaskId = None self.RequestId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.RequestId = params.get("RequestId") class CreateMediaProcessTaskRequest(AbstractModel): """CreateMediaProcessTask请求参数结构体 """ def __init__(self): r""" :param MediaProcessInfo: 编辑处理任务参数。 :type MediaProcessInfo: :class:`tencentcloud.ie.v20200304.models.MediaProcessInfo` :param SourceInfoSet: 编辑处理任务输入源列表。 :type SourceInfoSet: list of MediaSourceInfo :param SaveInfoSet: 结果存储信息,对于涉及存储的请求必选。部子任务支持数组备份写,具体以对应任务文档为准。 :type SaveInfoSet: list of SaveInfo :param CallbackInfoSet: 任务结果回调地址信息。部子任务支持数组备份回调,具体以对应任务文档为准。 :type CallbackInfoSet: list of CallbackInfo """ self.MediaProcessInfo = None self.SourceInfoSet = None self.SaveInfoSet = None self.CallbackInfoSet = None def _deserialize(self, params): if params.get("MediaProcessInfo") is not None: self.MediaProcessInfo = MediaProcessInfo() self.MediaProcessInfo._deserialize(params.get("MediaProcessInfo")) if params.get("SourceInfoSet") is not None: self.SourceInfoSet = [] for item in params.get("SourceInfoSet"): obj = MediaSourceInfo() obj._deserialize(item) self.SourceInfoSet.append(obj) if params.get("SaveInfoSet") is not None: self.SaveInfoSet = [] for item in params.get("SaveInfoSet"): obj = SaveInfo() obj._deserialize(item) self.SaveInfoSet.append(obj) if params.get("CallbackInfoSet") is not None: self.CallbackInfoSet = [] for item in params.get("CallbackInfoSet"): obj = CallbackInfo() obj._deserialize(item) self.CallbackInfoSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateMediaProcessTaskResponse(AbstractModel): """CreateMediaProcessTask返回参数结构体 """ def __init__(self): r""" :param TaskId: 编辑任务 ID,可以通过该 ID 查询任务状态和结果。 注意:此字段可能返回 null,表示取不到有效值。 :type TaskId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TaskId = None self.RequestId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.RequestId = params.get("RequestId") class CreateMediaQualityRestorationTaskRequest(AbstractModel): """CreateMediaQualityRestorationTask请求参数结构体 """ def __init__(self): r""" :param DownInfo: 源文件地址。 :type DownInfo: :class:`tencentcloud.ie.v20200304.models.DownInfo` :param TransInfo: 画质重生任务参数信息。 :type TransInfo: list of SubTaskTranscodeInfo :param SaveInfo: 任务结束后文件存储信息。 :type SaveInfo: :class:`tencentcloud.ie.v20200304.models.SaveInfo` :param CallbackInfo: 任务结果回调地址信息。 :type CallbackInfo: :class:`tencentcloud.ie.v20200304.models.CallbackInfo` """ self.DownInfo = None self.TransInfo = None self.SaveInfo = None self.CallbackInfo = None def _deserialize(self, params): if params.get("DownInfo") is not None: self.DownInfo = DownInfo() self.DownInfo._deserialize(params.get("DownInfo")) if params.get("TransInfo") is not None: self.TransInfo = [] for item in params.get("TransInfo"): obj = SubTaskTranscodeInfo() obj._deserialize(item) self.TransInfo.append(obj) if params.get("SaveInfo") is not None: self.SaveInfo = SaveInfo() self.SaveInfo._deserialize(params.get("SaveInfo")) if params.get("CallbackInfo") is not None: self.CallbackInfo = CallbackInfo() self.CallbackInfo._deserialize(params.get("CallbackInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateMediaQualityRestorationTaskResponse(AbstractModel): """CreateMediaQualityRestorationTask返回参数结构体 """ def __init__(self): r""" :param TaskId: 画质重生任务ID,可以通过该ID查询任务状态。 :type TaskId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TaskId = None self.RequestId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.RequestId = params.get("RequestId") class CreateQualityControlTaskRequest(AbstractModel): """CreateQualityControlTask请求参数结构体 """ def __init__(self): r""" :param QualityControlInfo: 质检任务参数 :type QualityControlInfo: :class:`tencentcloud.ie.v20200304.models.QualityControlInfo` :param DownInfo: 视频源信息 :type DownInfo: :class:`tencentcloud.ie.v20200304.models.DownInfo` :param CallbackInfo: 任务结果回调地址信息 :type CallbackInfo: :class:`tencentcloud.ie.v20200304.models.CallbackInfo` """ self.QualityControlInfo = None self.DownInfo = None self.CallbackInfo = None def _deserialize(self, params): if params.get("QualityControlInfo") is not None: self.QualityControlInfo = QualityControlInfo() self.QualityControlInfo._deserialize(params.get("QualityControlInfo")) if params.get("DownInfo") is not None: self.DownInfo = DownInfo() self.DownInfo._deserialize(params.get("DownInfo")) if params.get("CallbackInfo") is not None: self.CallbackInfo = CallbackInfo() self.CallbackInfo._deserialize(params.get("CallbackInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateQualityControlTaskResponse(AbstractModel): """CreateQualityControlTask返回参数结构体 """ def __init__(self): r""" :param TaskId: 质检任务 ID 注意:此字段可能返回 null,表示取不到有效值。 :type TaskId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TaskId = None self.RequestId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.RequestId = params.get("RequestId") class DarInfo(AbstractModel): """视频Dar信息 """ def __init__(self): r""" :param FillMode: 填充模式,可选值: 1:留黑,保持视频宽高比不变,边缘剩余部分使用黑色填充。 2:拉伸,对每一帧进行拉伸,填满整个画面,可能导致转码后的视频被“压扁“或者“拉长“。 默认为2。 :type FillMode: int """ self.FillMode = None def _deserialize(self, params): self.FillMode = params.get("FillMode") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Denoise(AbstractModel): """音频降噪 """ def __init__(self): r""" :param Type: 音频降噪强度,可选项: 1. weak 2.normal, 3.strong 默认为weak :type Type: str """ self.Type = None def _deserialize(self, params): self.Type = params.get("Type") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Denoising(AbstractModel): """去噪参数 """ def __init__(self): r""" :param Type: 去噪方式,可选项: templ:时域降噪; spatial:空域降噪, fast-spatial:快速空域降噪。 注意:可选择组合方式: 1.type:"templ,spatial" ; 2.type:"templ,fast-spatial"。 :type Type: str :param TemplStrength: 时域去噪强度,可选值:0.0-1.0 。小于0.0的默认为0.0,大于1.0的默认为1.0。 :type TemplStrength: float :param SpatialStrength: 空域去噪强度,可选值:0.0-1.0 。小于0.0的默认为0.0,大于1.0的默认为1.0。 :type SpatialStrength: float """ self.Type = None self.TemplStrength = None self.SpatialStrength = None def _deserialize(self, params): self.Type = params.get("Type") self.TemplStrength = params.get("TemplStrength") self.SpatialStrength = params.get("SpatialStrength") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeEditingTaskResultRequest(AbstractModel): """DescribeEditingTaskResult请求参数结构体 """ def __init__(self): r""" :param TaskId: 编辑任务 ID。 :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeEditingTaskResultResponse(AbstractModel): """DescribeEditingTaskResult返回参数结构体 """ def __init__(self): r""" :param TaskResult: 编辑任务结果信息。 :type TaskResult: :class:`tencentcloud.ie.v20200304.models.EditingTaskResult` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TaskResult = None self.RequestId = None def _deserialize(self, params): if params.get("TaskResult") is not None: self.TaskResult = EditingTaskResult() self.TaskResult._deserialize(params.get("TaskResult")) self.RequestId = params.get("RequestId") class DescribeMediaProcessTaskResultRequest(AbstractModel): """DescribeMediaProcessTaskResult请求参数结构体 """ def __init__(self): r""" :param TaskId: 编辑处理任务ID。 :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeMediaProcessTaskResultResponse(AbstractModel): """DescribeMediaProcessTaskResult返回参数结构体 """ def __init__(self): r""" :param TaskResult: 任务处理结果。 注意:此字段可能返回 null,表示取不到有效值。 :type TaskResult: :class:`tencentcloud.ie.v20200304.models.MediaProcessTaskResult` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TaskResult = None self.RequestId = None def _deserialize(self, params): if params.get("TaskResult") is not None: self.TaskResult = MediaProcessTaskResult() self.TaskResult._deserialize(params.get("TaskResult")) self.RequestId = params.get("RequestId") class DescribeMediaQualityRestorationTaskRusultRequest(AbstractModel): """DescribeMediaQualityRestorationTaskRusult请求参数结构体 """ def __init__(self): r""" :param TaskId: 画质重生任务ID :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeMediaQualityRestorationTaskRusultResponse(AbstractModel): """DescribeMediaQualityRestorationTaskRusult返回参数结构体 """ def __init__(self): r""" :param TaskResult: 画质重生任务结果信息 :type TaskResult: :class:`tencentcloud.ie.v20200304.models.MediaQualityRestorationTaskResult` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TaskResult = None self.RequestId = None def _deserialize(self, params): if params.get("TaskResult") is not None: self.TaskResult = MediaQualityRestorationTaskResult() self.TaskResult._deserialize(params.get("TaskResult")) self.RequestId = params.get("RequestId") class DescribeQualityControlTaskResultRequest(AbstractModel): """DescribeQualityControlTaskResult请求参数结构体 """ def __init__(self): r""" :param TaskId: 质检任务 ID :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeQualityControlTaskResultResponse(AbstractModel): """DescribeQualityControlTaskResult返回参数结构体 """ def __init__(self): r""" :param TaskResult: 质检任务结果信息 :type TaskResult: :class:`tencentcloud.ie.v20200304.models.QualityControlInfoTaskResult` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TaskResult = None self.RequestId = None def _deserialize(self, params): if params.get("TaskResult") is not None: self.TaskResult = QualityControlInfoTaskResult() self.TaskResult._deserialize(params.get("TaskResult")) self.RequestId = params.get("RequestId") class DownInfo(AbstractModel): """视频源信息 """ def __init__(self): r""" :param Type: 下载类型,可选值: 0:UrlInfo; 1:CosInfo。 :type Type: int :param UrlInfo: Url形式下载信息,当Type等于0时必选。 :type UrlInfo: :class:`tencentcloud.ie.v20200304.models.UrlInfo` :param CosInfo: Cos形式下载信息,当Type等于1时必选。 :type CosInfo: :class:`tencentcloud.ie.v20200304.models.CosInfo` """ self.Type = None self.UrlInfo = None self.CosInfo = None def _deserialize(self, params): self.Type = params.get("Type") if params.get("UrlInfo") is not None: self.UrlInfo = UrlInfo() self.UrlInfo._deserialize(params.get("UrlInfo")) if params.get("CosInfo") is not None: self.CosInfo = CosInfo() self.CosInfo._deserialize(params.get("CosInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class EditInfo(AbstractModel): """画质重生子任务视频剪辑参数 """ def __init__(self): r""" :param StartTime: 剪辑开始时间,单位:ms。 :type StartTime: int :param EndTime: 剪辑结束时间,单位:ms :type EndTime: int """ self.StartTime = None self.EndTime = None def _deserialize(self, params): self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class EditingInfo(AbstractModel): """智能编辑任务参数信息 """ def __init__(self): r""" :param TagEditingInfo: 视频标签识别任务参数,不填则不开启。 :type TagEditingInfo: :class:`tencentcloud.ie.v20200304.models.TagEditingInfo` :param ClassificationEditingInfo: 视频分类识别任务参数,不填则不开启。 :type ClassificationEditingInfo: :class:`tencentcloud.ie.v20200304.models.ClassificationEditingInfo` :param StripEditingInfo: 智能拆条任务参数,不填则不开启。 :type StripEditingInfo: :class:`tencentcloud.ie.v20200304.models.StripEditingInfo` :param HighlightsEditingInfo: 智能集锦任务参数,不填则不开启。 :type HighlightsEditingInfo: :class:`tencentcloud.ie.v20200304.models.HighlightsEditingInfo` :param CoverEditingInfo: 智能封面任务参数,不填则不开启。 :type CoverEditingInfo: :class:`tencentcloud.ie.v20200304.models.CoverEditingInfo` :param OpeningEndingEditingInfo: 片头片尾识别任务参数,不填则不开启。 :type OpeningEndingEditingInfo: :class:`tencentcloud.ie.v20200304.models.OpeningEndingEditingInfo` """ self.TagEditingInfo = None self.ClassificationEditingInfo = None self.StripEditingInfo = None self.HighlightsEditingInfo = None self.CoverEditingInfo = None self.OpeningEndingEditingInfo = None def _deserialize(self, params): if params.get("TagEditingInfo") is not None: self.TagEditingInfo = TagEditingInfo() self.TagEditingInfo._deserialize(params.get("TagEditingInfo")) if params.get("ClassificationEditingInfo") is not None: self.ClassificationEditingInfo = ClassificationEditingInfo() self.ClassificationEditingInfo._deserialize(params.get("ClassificationEditingInfo")) if params.get("StripEditingInfo") is not None: self.StripEditingInfo = StripEditingInfo() self.StripEditingInfo._deserialize(params.get("StripEditingInfo")) if params.get("HighlightsEditingInfo") is not None: self.HighlightsEditingInfo = HighlightsEditingInfo() self.HighlightsEditingInfo._deserialize(params.get("HighlightsEditingInfo")) if params.get("CoverEditingInfo") is not None: self.CoverEditingInfo = CoverEditingInfo() self.CoverEditingInfo._deserialize(params.get("CoverEditingInfo")) if params.get("OpeningEndingEditingInfo") is not None: self.OpeningEndingEditingInfo = OpeningEndingEditingInfo() self.OpeningEndingEditingInfo._deserialize(params.get("OpeningEndingEditingInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class EditingTaskResult(AbstractModel): """智能识别任务结果信息 """ def __init__(self): r""" :param TaskId: 编辑任务 ID。 :type TaskId: str :param Status: 编辑任务状态。 1:执行中;2:已完成。 :type Status: int :param TagTaskResult: 视频标签识别结果。 注意:此字段可能返回 null,表示取不到有效值。 :type TagTaskResult: :class:`tencentcloud.ie.v20200304.models.TagTaskResult` :param ClassificationTaskResult: 视频分类识别结果。 注意:此字段可能返回 null,表示取不到有效值。 :type ClassificationTaskResult: :class:`tencentcloud.ie.v20200304.models.ClassificationTaskResult` :param StripTaskResult: 智能拆条结果。 注意:此字段可能返回 null,表示取不到有效值。 :type StripTaskResult: :class:`tencentcloud.ie.v20200304.models.StripTaskResult` :param HighlightsTaskResult: 智能集锦结果。 注意:此字段可能返回 null,表示取不到有效值。 :type HighlightsTaskResult: :class:`tencentcloud.ie.v20200304.models.HighlightsTaskResult` :param CoverTaskResult: 智能封面结果。 注意:此字段可能返回 null,表示取不到有效值。 :type CoverTaskResult: :class:`tencentcloud.ie.v20200304.models.CoverTaskResult` :param OpeningEndingTaskResult: 片头片尾识别结果。 注意:此字段可能返回 null,表示取不到有效值。 :type OpeningEndingTaskResult: :class:`tencentcloud.ie.v20200304.models.OpeningEndingTaskResult` """ self.TaskId = None self.Status = None self.TagTaskResult = None self.ClassificationTaskResult = None self.StripTaskResult = None self.HighlightsTaskResult = None self.CoverTaskResult = None self.OpeningEndingTaskResult = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.Status = params.get("Status") if params.get("TagTaskResult") is not None: self.TagTaskResult = TagTaskResult() self.TagTaskResult._deserialize(params.get("TagTaskResult")) if params.get("ClassificationTaskResult") is not None: self.ClassificationTaskResult = ClassificationTaskResult() self.ClassificationTaskResult._deserialize(params.get("ClassificationTaskResult")) if params.get("StripTaskResult") is not None: self.StripTaskResult = StripTaskResult() self.StripTaskResult._deserialize(params.get("StripTaskResult")) if params.get("HighlightsTaskResult") is not None: self.HighlightsTaskResult = HighlightsTaskResult() self.HighlightsTaskResult._deserialize(params.get("HighlightsTaskResult")) if params.get("CoverTaskResult") is not None: self.CoverTaskResult = CoverTaskResult() self.CoverTaskResult._deserialize(params.get("CoverTaskResult")) if params.get("OpeningEndingTaskResult") is not None: self.OpeningEndingTaskResult = OpeningEndingTaskResult() self.OpeningEndingTaskResult._deserialize(params.get("OpeningEndingTaskResult")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FaceProtect(AbstractModel): """人脸保护参数 """ def __init__(self): r""" :param FaceUsmRatio: 人脸区域增强强度,可选项:0.0-1.0。小于0.0的默认为0.0,大于1.0的默认为1.0。 :type FaceUsmRatio: float """ self.FaceUsmRatio = None def _deserialize(self, params): self.FaceUsmRatio = params.get("FaceUsmRatio") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FileInfo(AbstractModel): """画质重生处理后文件的详细信息 """ def __init__(self): r""" :param FileSize: 任务结束后生成的文件大小。 注意:此字段可能返回 null,表示取不到有效值。 :type FileSize: int :param FileType: 任务结束后生成的文件格式,例如:mp4,flv等等。 注意:此字段可能返回 null,表示取不到有效值。 :type FileType: str :param Bitrate: 任务结束后生成的文件整体码率,单位:bps。 注意:此字段可能返回 null,表示取不到有效值。 :type Bitrate: int :param Duration: 任务结束后生成的文件时长,单位:ms。 注意:此字段可能返回 null,表示取不到有效值。 :type Duration: int :param VideoInfoResult: 任务结束后生成的文件视频信息。 注意:此字段可能返回 null,表示取不到有效值。 :type VideoInfoResult: list of VideoInfoResultItem :param AudioInfoResult: 任务结束后生成的文件音频信息。 注意:此字段可能返回 null,表示取不到有效值。 :type AudioInfoResult: list of AudioInfoResultItem """ self.FileSize = None self.FileType = None self.Bitrate = None self.Duration = None self.VideoInfoResult = None self.AudioInfoResult = None def _deserialize(self, params): self.FileSize = params.get("FileSize") self.FileType = params.get("FileType") self.Bitrate = params.get("Bitrate") self.Duration = params.get("Duration") if params.get("VideoInfoResult") is not None: self.VideoInfoResult = [] for item in params.get("VideoInfoResult"): obj = VideoInfoResultItem() obj._deserialize(item) self.VideoInfoResult.append(obj) if params.get("AudioInfoResult") is not None: self.AudioInfoResult = [] for item in params.get("AudioInfoResult"): obj = AudioInfoResultItem() obj._deserialize(item) self.AudioInfoResult.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FrameTagItem(AbstractModel): """帧标签 """ def __init__(self): r""" :param StartPts: 标签起始时间戳PTS(ms) :type StartPts: int :param EndPts: 语句结束时间戳PTS(ms) :type EndPts: int :param Period: 字符串形式的起始结束时间 :type Period: str :param TagItems: 标签数组 :type TagItems: list of TagItem """ self.StartPts = None self.EndPts = None self.Period = None self.TagItems = None def _deserialize(self, params): self.StartPts = params.get("StartPts") self.EndPts = params.get("EndPts") self.Period = params.get("Period") if params.get("TagItems") is not None: self.TagItems = [] for item in params.get("TagItems"): obj = TagItem() obj._deserialize(item) self.TagItems.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FrameTagRec(AbstractModel): """帧标签任务参数 """ def __init__(self): r""" :param TagType: 标签类型: "Common": 通用类型 "Game":游戏类型 :type TagType: str :param GameExtendType: 游戏具体类型: "HonorOfKings_AnchorViews":王者荣耀主播视角 "HonorOfKings_GameViews":王者荣耀比赛视角 "LOL_AnchorViews":英雄联盟主播视角 "LOL_GameViews":英雄联盟比赛视角 "PUBG_AnchorViews":和平精英主播视角 "PUBG_GameViews":和平精英比赛视角 :type GameExtendType: str """ self.TagType = None self.GameExtendType = None def _deserialize(self, params): self.TagType = params.get("TagType") self.GameExtendType = params.get("GameExtendType") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FrameTagResult(AbstractModel): """帧标签结果 """ def __init__(self): r""" :param FrameTagItems: 帧标签结果数组 :type FrameTagItems: list of FrameTagItem """ self.FrameTagItems = None def _deserialize(self, params): if params.get("FrameTagItems") is not None: self.FrameTagItems = [] for item in params.get("FrameTagItems"): obj = FrameTagItem() obj._deserialize(item) self.FrameTagItems.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class HiddenMarkInfo(AbstractModel): """数字水印 """ def __init__(self): r""" :param Path: 数字水印路径,,如果不从Cos拉取水印,则必填 :type Path: str :param Frequency: 数字水印频率,可选值:[1,256],默认值为30 :type Frequency: int :param Strength: 数字水印强度,可选值:[32,128],默认值为64 :type Strength: int :param CosInfo: 数字水印的Cos 信息,从Cos上拉取图片水印时必填。 :type CosInfo: :class:`tencentcloud.ie.v20200304.models.CosInfo` """ self.Path = None self.Frequency = None self.Strength = None self.CosInfo = None def _deserialize(self, params): self.Path = params.get("Path") self.Frequency = params.get("Frequency") self.Strength = params.get("Strength") if params.get("CosInfo") is not None: self.CosInfo = CosInfo() self.CosInfo._deserialize(params.get("CosInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class HighlightsEditingInfo(AbstractModel): """智能集锦任务参数信息 """ def __init__(self): r""" :param Switch: 是否开启智能集锦。0为关闭,1为开启。其他非0非1值默认为0。 :type Switch: int :param CustomInfo: 额外定制化服务参数。参数为序列化的Json字符串,例如:{"k1":"v1"}。 :type CustomInfo: str """ self.Switch = None self.CustomInfo = None def _deserialize(self, params): self.Switch = params.get("Switch") self.CustomInfo = params.get("CustomInfo") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class HighlightsTaskResult(AbstractModel): """智能集锦结果信息 """ def __init__(self): r""" :param Status: 编辑任务状态。 1:执行中;2:成功;3:失败。 :type Status: int :param ErrCode: 编辑任务失败错误码。 0:成功;其他值:失败。 :type ErrCode: int :param ErrMsg: 编辑任务失败错误描述。 :type ErrMsg: str :param ItemSet: 智能集锦结果集。 注意:此字段可能返回 null,表示取不到有效值。 :type ItemSet: list of HighlightsTaskResultItem """ self.Status = None self.ErrCode = None self.ErrMsg = None self.ItemSet = None def _deserialize(self, params): self.Status = params.get("Status") self.ErrCode = params.get("ErrCode") self.ErrMsg = params.get("ErrMsg") if params.get("ItemSet") is not None: self.ItemSet = [] for item in params.get("ItemSet"): obj = HighlightsTaskResultItem() obj._deserialize(item) self.ItemSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class HighlightsTaskResultItem(AbstractModel): """智能集锦结果项 """ def __init__(self): r""" :param HighlightUrl: 智能集锦地址。 :type HighlightUrl: str :param CovImgUrl: 智能集锦封面地址。 :type CovImgUrl: str :param Confidence: 置信度,取值范围是 0 到 100。 :type Confidence: float :param Duration: 智能集锦持续时间,单位:秒。 :type Duration: float :param SegmentSet: 智能集锦子片段结果集,集锦片段由这些子片段拼接生成。 :type SegmentSet: list of HighlightsTaskResultItemSegment """ self.HighlightUrl = None self.CovImgUrl = None self.Confidence = None self.Duration = None self.SegmentSet = None def _deserialize(self, params): self.HighlightUrl = params.get("HighlightUrl") self.CovImgUrl = params.get("CovImgUrl") self.Confidence = params.get("Confidence") self.Duration = params.get("Duration") if params.get("SegmentSet") is not None: self.SegmentSet = [] for item in params.get("SegmentSet"): obj = HighlightsTaskResultItemSegment() obj._deserialize(item) self.SegmentSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class HighlightsTaskResultItemSegment(AbstractModel): """智能集锦结果片段 """ def __init__(self): r""" :param Confidence: 置信度,取值范围是 0 到 100。 :type Confidence: float :param StartTimeOffset: 集锦片段起始的偏移时间,单位:秒。 :type StartTimeOffset: float :param EndTimeOffset: 集锦片段终止的偏移时间,单位:秒。 :type EndTimeOffset: float """ self.Confidence = None self.StartTimeOffset = None self.EndTimeOffset = None def _deserialize(self, params): self.Confidence = params.get("Confidence") self.StartTimeOffset = params.get("StartTimeOffset") self.EndTimeOffset = params.get("EndTimeOffset") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class IntervalTime(AbstractModel): """周期时间点信息。 """ def __init__(self): r""" :param Interval: 间隔周期,单位ms :type Interval: int :param StartTime: 开始时间点,单位ms :type StartTime: int """ self.Interval = None self.StartTime = None def _deserialize(self, params): self.Interval = params.get("Interval") self.StartTime = params.get("StartTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class LoudnessInfo(AbstractModel): """音频响度信息 """ def __init__(self): r""" :param Loudness: 音频整体响度 :type Loudness: float :param LoudnessRange: 音频响度范围 :type LoudnessRange: float """ self.Loudness = None self.LoudnessRange = None def _deserialize(self, params): self.Loudness = params.get("Loudness") self.LoudnessRange = params.get("LoudnessRange") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class LowLightEnhance(AbstractModel): """低光照增强参数 """ def __init__(self): r""" :param Type: 低光照增强类型,可选项:normal。 :type Type: str """ self.Type = None def _deserialize(self, params): self.Type = params.get("Type") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaCuttingInfo(AbstractModel): """编辑处理/剪切任务信息 """ def __init__(self): r""" :param TimeInfo: 截取时间信息。 :type TimeInfo: :class:`tencentcloud.ie.v20200304.models.MediaCuttingTimeInfo` :param TargetInfo: 输出结果信息。 :type TargetInfo: :class:`tencentcloud.ie.v20200304.models.MediaTargetInfo` :param OutForm: 截取结果形式信息。 :type OutForm: :class:`tencentcloud.ie.v20200304.models.MediaCuttingOutForm` :param ResultListSaveType: 列表文件形式,存储到用户存储服务中,可选值: UseSaveInfo:默认,结果列表和结果存储同一位置; NoListFile:不存储结果列表。 :type ResultListSaveType: str """ self.TimeInfo = None self.TargetInfo = None self.OutForm = None self.ResultListSaveType = None def _deserialize(self, params): if params.get("TimeInfo") is not None: self.TimeInfo = MediaCuttingTimeInfo() self.TimeInfo._deserialize(params.get("TimeInfo")) if params.get("TargetInfo") is not None: self.TargetInfo = MediaTargetInfo() self.TargetInfo._deserialize(params.get("TargetInfo")) if params.get("OutForm") is not None: self.OutForm = MediaCuttingOutForm() self.OutForm._deserialize(params.get("OutForm")) self.ResultListSaveType = params.get("ResultListSaveType") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaCuttingOutForm(AbstractModel): """编辑处理/剪切任务/输出形式信息 """ def __init__(self): r""" :param Type: 输出类型,可选值: Static:静态图; Dynamic:动态图; Sprite:雪碧图; Video:视频。 注1:不同类型时,对应的 TargetInfo.Format 格式支持如下: Static:jpg、png; Dynamic:gif; Sprite:jpg、png; Video:mp4。 注2:当 Type=Sprite时,TargetInfo指定的尺寸表示小图的大小,最终结果尺寸以输出为准。 :type Type: str :param FillType: 背景填充方式,可选值: White:白色填充; Black:黑色填充; Stretch:拉伸; Gaussian:高斯模糊; 默认White。 :type FillType: str :param SpriteRowCount: Type=Sprite时有效,表示雪碧图行数,范围为 [1,200],默认100。 :type SpriteRowCount: int :param SpriteColumnCount: Type=Sprite时有效,表示雪碧图列数,范围为 [1,200],默认100。 :type SpriteColumnCount: int """ self.Type = None self.FillType = None self.SpriteRowCount = None self.SpriteColumnCount = None def _deserialize(self, params): self.Type = params.get("Type") self.FillType = params.get("FillType") self.SpriteRowCount = params.get("SpriteRowCount") self.SpriteColumnCount = params.get("SpriteColumnCount") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaCuttingTaskResult(AbstractModel): """编辑处理/剪切任务/处理结果 """ def __init__(self): r""" :param ListFile: 如果ResultListType不为NoListFile时,结果(TaskResultFile)列表文件的存储位置。 注意:此字段可能返回 null,表示取不到有效值。 :type ListFile: :class:`tencentcloud.ie.v20200304.models.TaskResultFile` :param ResultCount: 结果个数。 注意:此字段可能返回 null,表示取不到有效值。 :type ResultCount: int :param FirstFile: 第一个结果文件。 注意:此字段可能返回 null,表示取不到有效值。 :type FirstFile: :class:`tencentcloud.ie.v20200304.models.TaskResultFile` :param LastFile: 最后一个结果文件。 注意:此字段可能返回 null,表示取不到有效值。 :type LastFile: :class:`tencentcloud.ie.v20200304.models.TaskResultFile` """ self.ListFile = None self.ResultCount = None self.FirstFile = None self.LastFile = None def _deserialize(self, params): if params.get("ListFile") is not None: self.ListFile = TaskResultFile() self.ListFile._deserialize(params.get("ListFile")) self.ResultCount = params.get("ResultCount") if params.get("FirstFile") is not None: self.FirstFile = TaskResultFile() self.FirstFile._deserialize(params.get("FirstFile")) if params.get("LastFile") is not None: self.LastFile = TaskResultFile() self.LastFile._deserialize(params.get("LastFile")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaCuttingTimeInfo(AbstractModel): """编辑处理/剪切任务/时间信息 """ def __init__(self): r""" :param Type: 时间类型,可选值: PointSet:时间点集合; IntervalPoint:周期采样点; SectionSet:时间片段集合。 :type Type: str :param PointSet: 截取时间点集合,单位毫秒,Type=PointSet时必选。 :type PointSet: list of int :param IntervalPoint: 周期采样点信息,Type=IntervalPoint时必选。 :type IntervalPoint: :class:`tencentcloud.ie.v20200304.models.IntervalTime` :param SectionSet: 时间区间集合信息,Type=SectionSet时必选。 :type SectionSet: list of SectionTime """ self.Type = None self.PointSet = None self.IntervalPoint = None self.SectionSet = None def _deserialize(self, params): self.Type = params.get("Type") self.PointSet = params.get("PointSet") if params.get("IntervalPoint") is not None: self.IntervalPoint = IntervalTime() self.IntervalPoint._deserialize(params.get("IntervalPoint")) if params.get("SectionSet") is not None: self.SectionSet = [] for item in params.get("SectionSet"): obj = SectionTime() obj._deserialize(item) self.SectionSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaJoiningInfo(AbstractModel): """编辑处理/拼接任务信息 """ def __init__(self): r""" :param TargetInfo: 输出目标信息,拼接只采用FileName和Format,用于指定目标文件名和格式。 其中Format只支持mp4. :type TargetInfo: :class:`tencentcloud.ie.v20200304.models.MediaTargetInfo` """ self.TargetInfo = None def _deserialize(self, params): if params.get("TargetInfo") is not None: self.TargetInfo = MediaTargetInfo() self.TargetInfo._deserialize(params.get("TargetInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaJoiningTaskResult(AbstractModel): """编辑处理/拼接任务/处理结果 """ def __init__(self): r""" :param File: 拼接结果文件。 注意:此字段可能返回 null,表示取不到有效值。 :type File: :class:`tencentcloud.ie.v20200304.models.TaskResultFile` """ self.File = None def _deserialize(self, params): if params.get("File") is not None: self.File = TaskResultFile() self.File._deserialize(params.get("File")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaProcessInfo(AbstractModel): """编辑处理/任务信息 """ def __init__(self): r""" :param Type: 编辑处理任务类型,可选值: MediaEditing:媒体编辑(待上线); MediaCutting:媒体剪切; MediaJoining:媒体拼接。 MediaRecognition: 媒体识别。 :type Type: str :param MediaCuttingInfo: 视频剪切任务参数,Type=MediaCutting时必选。 :type MediaCuttingInfo: :class:`tencentcloud.ie.v20200304.models.MediaCuttingInfo` :param MediaJoiningInfo: 视频拼接任务参数,Type=MediaJoining时必选。 :type MediaJoiningInfo: :class:`tencentcloud.ie.v20200304.models.MediaJoiningInfo` :param MediaRecognitionInfo: 媒体识别任务参数,Type=MediaRecognition时必选 :type MediaRecognitionInfo: :class:`tencentcloud.ie.v20200304.models.MediaRecognitionInfo` """ self.Type = None self.MediaCuttingInfo = None self.MediaJoiningInfo = None self.MediaRecognitionInfo = None def _deserialize(self, params): self.Type = params.get("Type") if params.get("MediaCuttingInfo") is not None: self.MediaCuttingInfo = MediaCuttingInfo() self.MediaCuttingInfo._deserialize(params.get("MediaCuttingInfo")) if params.get("MediaJoiningInfo") is not None: self.MediaJoiningInfo = MediaJoiningInfo() self.MediaJoiningInfo._deserialize(params.get("MediaJoiningInfo")) if params.get("MediaRecognitionInfo") is not None: self.MediaRecognitionInfo = MediaRecognitionInfo() self.MediaRecognitionInfo._deserialize(params.get("MediaRecognitionInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaProcessTaskResult(AbstractModel): """编辑处理/任务处理结果 """ def __init__(self): r""" :param TaskId: 编辑处理任务ID。 注意:此字段可能返回 null,表示取不到有效值。 :type TaskId: str :param Type: 编辑处理任务类型,取值: MediaEditing:视频编辑(待上线); MediaCutting:视频剪切; MediaJoining:视频拼接。 MediaRecognition:媒体识别; 注意:此字段可能返回 null,表示取不到有效值。 :type Type: str :param Progress: 处理进度,范围:[0,100] 注意:此字段可能返回 null,表示取不到有效值。 :type Progress: int :param Status: 任务状态: 1100:等待中; 1200:执行中; 2000:成功; 5000:失败。 注意:此字段可能返回 null,表示取不到有效值。 :type Status: int :param ErrCode: 任务错误码。 注意:此字段可能返回 null,表示取不到有效值。 :type ErrCode: int :param ErrMsg: 任务错误信息。 注意:此字段可能返回 null,表示取不到有效值。 :type ErrMsg: str :param MediaCuttingTaskResult: 剪切任务处理结果,当Type=MediaCutting时才有效。 注意:此字段可能返回 null,表示取不到有效值。 :type MediaCuttingTaskResult: :class:`tencentcloud.ie.v20200304.models.MediaCuttingTaskResult` :param MediaJoiningTaskResult: 拼接任务处理结果,当Type=MediaJoining时才有效。 注意:此字段可能返回 null,表示取不到有效值。 :type MediaJoiningTaskResult: :class:`tencentcloud.ie.v20200304.models.MediaJoiningTaskResult` :param MediaRecognitionTaskResult: 媒体识别任务处理结果,当Type=MediaRecognition时才有效。 注意:此字段可能返回 null,表示取不到有效值。 :type MediaRecognitionTaskResult: :class:`tencentcloud.ie.v20200304.models.MediaRecognitionTaskResult` """ self.TaskId = None self.Type = None self.Progress = None self.Status = None self.ErrCode = None self.ErrMsg = None self.MediaCuttingTaskResult = None self.MediaJoiningTaskResult = None self.MediaRecognitionTaskResult = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.Type = params.get("Type") self.Progress = params.get("Progress") self.Status = params.get("Status") self.ErrCode = params.get("ErrCode") self.ErrMsg = params.get("ErrMsg") if params.get("MediaCuttingTaskResult") is not None: self.MediaCuttingTaskResult = MediaCuttingTaskResult() self.MediaCuttingTaskResult._deserialize(params.get("MediaCuttingTaskResult")) if params.get("MediaJoiningTaskResult") is not None: self.MediaJoiningTaskResult = MediaJoiningTaskResult() self.MediaJoiningTaskResult._deserialize(params.get("MediaJoiningTaskResult")) if params.get("MediaRecognitionTaskResult") is not None: self.MediaRecognitionTaskResult = MediaRecognitionTaskResult() self.MediaRecognitionTaskResult._deserialize(params.get("MediaRecognitionTaskResult")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaQualityRestorationTaskResult(AbstractModel): """画质重生任务结果 """ def __init__(self): r""" :param TaskId: 画质重生任务ID :type TaskId: str :param SubTaskResult: 画质重生处理后文件的详细信息。 :type SubTaskResult: list of SubTaskResultItem """ self.TaskId = None self.SubTaskResult = None def _deserialize(self, params): self.TaskId = params.get("TaskId") if params.get("SubTaskResult") is not None: self.SubTaskResult = [] for item in params.get("SubTaskResult"): obj = SubTaskResultItem() obj._deserialize(item) self.SubTaskResult.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaRecognitionInfo(AbstractModel): """媒体识别任务参数 """ def __init__(self): r""" :param FrameTagRec: 帧标签识别 :type FrameTagRec: :class:`tencentcloud.ie.v20200304.models.FrameTagRec` :param SubtitleRec: 语音字幕识别 :type SubtitleRec: :class:`tencentcloud.ie.v20200304.models.SubtitleRec` """ self.FrameTagRec = None self.SubtitleRec = None def _deserialize(self, params): if params.get("FrameTagRec") is not None: self.FrameTagRec = FrameTagRec() self.FrameTagRec._deserialize(params.get("FrameTagRec")) if params.get("SubtitleRec") is not None: self.SubtitleRec = SubtitleRec() self.SubtitleRec._deserialize(params.get("SubtitleRec")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaRecognitionTaskResult(AbstractModel): """媒体识别任务处理结果 """ def __init__(self): r""" :param FrameTagResults: 帧标签识别结果 注意:此字段可能返回 null,表示取不到有效值。 :type FrameTagResults: :class:`tencentcloud.ie.v20200304.models.FrameTagResult` :param SubtitleResults: 语音字幕识别结果 注意:此字段可能返回 null,表示取不到有效值。 :type SubtitleResults: :class:`tencentcloud.ie.v20200304.models.SubtitleResult` """ self.FrameTagResults = None self.SubtitleResults = None def _deserialize(self, params): if params.get("FrameTagResults") is not None: self.FrameTagResults = FrameTagResult() self.FrameTagResults._deserialize(params.get("FrameTagResults")) if params.get("SubtitleResults") is not None: self.SubtitleResults = SubtitleResult() self.SubtitleResults._deserialize(params.get("SubtitleResults")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaResultInfo(AbstractModel): """结果文件媒体信息 """ def __init__(self): r""" :param Duration: 媒体时长,单位:毫秒 注意:此字段可能返回 null,表示取不到有效值。 :type Duration: int :param ResultVideoInfoSet: 视频流信息 注意:此字段可能返回 null,表示取不到有效值。 :type ResultVideoInfoSet: list of ResultVideoInfo :param ResultAudioInfoSet: 音频流信息 注意:此字段可能返回 null,表示取不到有效值。 :type ResultAudioInfoSet: list of ResultAudioInfo """ self.Duration = None self.ResultVideoInfoSet = None self.ResultAudioInfoSet = None def _deserialize(self, params): self.Duration = params.get("Duration") if params.get("ResultVideoInfoSet") is not None: self.ResultVideoInfoSet = [] for item in params.get("ResultVideoInfoSet"): obj = ResultVideoInfo() obj._deserialize(item) self.ResultVideoInfoSet.append(obj) if params.get("ResultAudioInfoSet") is not None: self.ResultAudioInfoSet = [] for item in params.get("ResultAudioInfoSet"): obj = ResultAudioInfo() obj._deserialize(item) self.ResultAudioInfoSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaSourceInfo(AbstractModel): """编辑处理的媒体源 """ def __init__(self): r""" :param DownInfo: 媒体源资源下载信息。 :type DownInfo: :class:`tencentcloud.ie.v20200304.models.DownInfo` :param Id: 媒体源ID标记,用于多个输入源时,请内媒体源的定位,对于多输入的任务,一般要求必选。 ID只能包含字母、数字、下划线、中划线,长读不能超过128。 :type Id: str :param Type: 媒体源类型,具体类型如下: Video:视频 Image:图片 Audio:音频 :type Type: str """ self.DownInfo = None self.Id = None self.Type = None def _deserialize(self, params): if params.get("DownInfo") is not None: self.DownInfo = DownInfo() self.DownInfo._deserialize(params.get("DownInfo")) self.Id = params.get("Id") self.Type = params.get("Type") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaTargetInfo(AbstractModel): """目标媒体信息。 """ def __init__(self): r""" :param FileName: 目标文件名,不能带特殊字符(如/等),无需后缀名,最长200字符。 注1:部分子服务支持占位符,形式为: {parameter} 预设parameter有: index:序号; :type FileName: str :param Format: 媒体封装格式,最长5字符,具体格式支持根据子任务确定。 :type Format: str :param TargetVideoInfo: 视频流信息。 :type TargetVideoInfo: :class:`tencentcloud.ie.v20200304.models.TargetVideoInfo` :param ResultListSaveType: 【不再使用】 对于多输出任务,部分子服务推荐结果信息以列表文件形式,存储到用户存储服务中,可选值: UseSaveInfo:默认,结果列表和结果存储同一位置; NoListFile:不存储结果列表。 :type ResultListSaveType: str """ self.FileName = None self.Format = None self.TargetVideoInfo = None self.ResultListSaveType = None def _deserialize(self, params): self.FileName = params.get("FileName") self.Format = params.get("Format") if params.get("TargetVideoInfo") is not None: self.TargetVideoInfo = TargetVideoInfo() self.TargetVideoInfo._deserialize(params.get("TargetVideoInfo")) self.ResultListSaveType = params.get("ResultListSaveType") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MuxInfo(AbstractModel): """流封装信息 """ def __init__(self): r""" :param DeleteStream: 删除流,可选项:video,audio。 :type DeleteStream: str :param FlvFlags: Flv 参数,目前支持add_keyframe_index :type FlvFlags: str """ self.DeleteStream = None self.FlvFlags = None def _deserialize(self, params): self.DeleteStream = params.get("DeleteStream") self.FlvFlags = params.get("FlvFlags") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class OpeningEndingEditingInfo(AbstractModel): """片头片尾识别任务参数信息 """ def __init__(self): r""" :param Switch: 是否开启片头片尾识别。0为关闭,1为开启。其他非0非1值默认为0。 :type Switch: int :param CustomInfo: 额外定制化服务参数。参数为序列化的Json字符串,例如:{"k1":"v1"}。 :type CustomInfo: str """ self.Switch = None self.CustomInfo = None def _deserialize(self, params): self.Switch = params.get("Switch") self.CustomInfo = params.get("CustomInfo") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class OpeningEndingTaskResult(AbstractModel): """片头片尾识别结果信息 """ def __init__(self): r""" :param Status: 编辑任务状态。 1:执行中;2:成功;3:失败。 :type Status: int :param ErrCode: 编辑任务失败错误码。 0:成功;其他值:失败。 :type ErrCode: int :param ErrMsg: 编辑任务失败错误描述。 :type ErrMsg: str :param Item: 片头片尾识别结果项。 注意:此字段可能返回 null,表示取不到有效值。 :type Item: :class:`tencentcloud.ie.v20200304.models.OpeningEndingTaskResultItem` """ self.Status = None self.ErrCode = None self.ErrMsg = None self.Item = None def _deserialize(self, params): self.Status = params.get("Status") self.ErrCode = params.get("ErrCode") self.ErrMsg = params.get("ErrMsg") if params.get("Item") is not None: self.Item = OpeningEndingTaskResultItem() self.Item._deserialize(params.get("Item")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class OpeningEndingTaskResultItem(AbstractModel): """片头片尾识别结果项 """ def __init__(self): r""" :param OpeningTimeOffset: 视频片头的结束时间点,单位:秒。 :type OpeningTimeOffset: float :param OpeningConfidence: 片头识别置信度,取值范围是 0 到 100。 :type OpeningConfidence: float :param EndingTimeOffset: 视频片尾的开始时间点,单位:秒。 :type EndingTimeOffset: float :param EndingConfidence: 片尾识别置信度,取值范围是 0 到 100。 :type EndingConfidence: float """ self.OpeningTimeOffset = None self.OpeningConfidence = None self.EndingTimeOffset = None self.EndingConfidence = None def _deserialize(self, params): self.OpeningTimeOffset = params.get("OpeningTimeOffset") self.OpeningConfidence = params.get("OpeningConfidence") self.EndingTimeOffset = params.get("EndingTimeOffset") self.EndingConfidence = params.get("EndingConfidence") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class PicMarkInfoItem(AbstractModel): """图片水印信息 """ def __init__(self): r""" :param PosX: 图片水印的X坐标。 :type PosX: int :param PosY: 图片水印的Y坐标 。 :type PosY: int :param Path: 图片水印路径,,如果不从Cos拉取水印,则必填 :type Path: str :param CosInfo: 图片水印的Cos 信息,从Cos上拉取图片水印时必填。 :type CosInfo: :class:`tencentcloud.ie.v20200304.models.CosInfo` :param Width: 图片水印宽度,不填为图片原始宽度。 :type Width: int :param Height: 图片水印高度,不填为图片原始高度。 :type Height: int :param StartTime: 添加图片水印的开始时间,单位:ms。 :type StartTime: int :param EndTime: 添加图片水印的结束时间,单位:ms。 :type EndTime: int """ self.PosX = None self.PosY = None self.Path = None self.CosInfo = None self.Width = None self.Height = None self.StartTime = None self.EndTime = None def _deserialize(self, params): self.PosX = params.get("PosX") self.PosY = params.get("PosY") self.Path = params.get("Path") if params.get("CosInfo") is not None: self.CosInfo = CosInfo() self.CosInfo._deserialize(params.get("CosInfo")) self.Width = params.get("Width") self.Height = params.get("Height") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class QualityControlInfo(AbstractModel): """媒体质检任务参数信息 """ def __init__(self): r""" :param Interval: 对流进行截图的间隔ms,默认1000ms :type Interval: int :param VideoShot: 是否保存截图 :type VideoShot: bool :param Jitter: 是否检测抖动重影 :type Jitter: bool :param Blur: 是否检测模糊 :type Blur: bool :param AbnormalLighting: 是否检测低光照、过曝 :type AbnormalLighting: bool :param CrashScreen: 是否检测花屏 :type CrashScreen: bool :param BlackWhiteEdge: 是否检测黑边、白边、黑屏、白屏、绿屏 :type BlackWhiteEdge: bool :param Noise: 是否检测噪点 :type Noise: bool :param Mosaic: 是否检测马赛克 :type Mosaic: bool :param QRCode: 是否检测二维码,包括小程序码、条形码 :type QRCode: bool :param QualityEvaluation: 是否开启画面质量评价 :type QualityEvaluation: bool :param QualityEvalScore: 画面质量评价过滤阈值,结果只返回低于阈值的时间段,默认60 :type QualityEvalScore: int :param Voice: 是否检测视频音频,包含静音、低音、爆音 :type Voice: bool """ self.Interval = None self.VideoShot = None self.Jitter = None self.Blur = None self.AbnormalLighting = None self.CrashScreen = None self.BlackWhiteEdge = None self.Noise = None self.Mosaic = None self.QRCode = None self.QualityEvaluation = None self.QualityEvalScore = None self.Voice = None def _deserialize(self, params): self.Interval = params.get("Interval") self.VideoShot = params.get("VideoShot") self.Jitter = params.get("Jitter") self.Blur = params.get("Blur") self.AbnormalLighting = params.get("AbnormalLighting") self.CrashScreen = params.get("CrashScreen") self.BlackWhiteEdge = params.get("BlackWhiteEdge") self.Noise = params.get("Noise") self.Mosaic = params.get("Mosaic") self.QRCode = params.get("QRCode") self.QualityEvaluation = params.get("QualityEvaluation") self.QualityEvalScore = params.get("QualityEvalScore") self.Voice = params.get("Voice") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class QualityControlInfoTaskResult(AbstractModel): """媒体质检结果信息 """ def __init__(self): r""" :param TaskId: 质检任务 ID :type TaskId: str :param Status: 质检任务状态。 1:执行中;2:成功;3:失败 :type Status: int :param Progress: 表示处理进度百分比 :type Progress: int :param UsedTime: 处理时长(s) :type UsedTime: int :param Duration: 计费时长(s) :type Duration: int :param NoAudio: 为true时表示视频无音频轨 注意:此字段可能返回 null,表示取不到有效值。 :type NoAudio: bool :param NoVideo: 为true时表示视频无视频轨 注意:此字段可能返回 null,表示取不到有效值。 :type NoVideo: bool :param QualityEvaluationScore: 视频无参考质量打分,百分制 注意:此字段可能返回 null,表示取不到有效值。 :type QualityEvaluationScore: int :param QualityEvaluationResults: 视频画面无参考评分低于阈值的时间段 注意:此字段可能返回 null,表示取不到有效值。 :type QualityEvaluationResults: list of QualityControlResultItems :param JitterResults: 视频画面抖动时间段 注意:此字段可能返回 null,表示取不到有效值。 :type JitterResults: list of QualityControlResultItems :param BlurResults: 视频画面模糊时间段 注意:此字段可能返回 null,表示取不到有效值。 :type BlurResults: list of QualityControlResultItems :param AbnormalLightingResults: 视频画面低光、过曝时间段 注意:此字段可能返回 null,表示取不到有效值。 :type AbnormalLightingResults: list of QualityControlResultItems :param CrashScreenResults: 视频画面花屏时间段 注意:此字段可能返回 null,表示取不到有效值。 :type CrashScreenResults: list of QualityControlResultItems :param BlackWhiteEdgeResults: 视频画面黑边、白边、黑屏、白屏、纯色屏时间段 注意:此字段可能返回 null,表示取不到有效值。 :type BlackWhiteEdgeResults: list of QualityControlResultItems :param NoiseResults: 视频画面有噪点时间段 注意:此字段可能返回 null,表示取不到有效值。 :type NoiseResults: list of QualityControlResultItems :param MosaicResults: 视频画面有马赛克时间段 注意:此字段可能返回 null,表示取不到有效值。 :type MosaicResults: list of QualityControlResultItems :param QRCodeResults: 视频画面有二维码的时间段,包括小程序码、条形码 注意:此字段可能返回 null,表示取不到有效值。 :type QRCodeResults: list of QualityControlResultItems :param VoiceResults: 视频音频异常时间段,包括静音、低音、爆音 注意:此字段可能返回 null,表示取不到有效值。 :type VoiceResults: list of QualityControlResultItems :param ErrCode: 任务错误码 注意:此字段可能返回 null,表示取不到有效值。 :type ErrCode: int :param ErrMsg: 任务错误信息 注意:此字段可能返回 null,表示取不到有效值。 :type ErrMsg: str """ self.TaskId = None self.Status = None self.Progress = None self.UsedTime = None self.Duration = None self.NoAudio = None self.NoVideo = None self.QualityEvaluationScore = None self.QualityEvaluationResults = None self.JitterResults = None self.BlurResults = None self.AbnormalLightingResults = None self.CrashScreenResults = None self.BlackWhiteEdgeResults = None self.NoiseResults = None self.MosaicResults = None self.QRCodeResults = None self.VoiceResults = None self.ErrCode = None self.ErrMsg = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.Status = params.get("Status") self.Progress = params.get("Progress") self.UsedTime = params.get("UsedTime") self.Duration = params.get("Duration") self.NoAudio = params.get("NoAudio") self.NoVideo = params.get("NoVideo") self.QualityEvaluationScore = params.get("QualityEvaluationScore") if params.get("QualityEvaluationResults") is not None: self.QualityEvaluationResults = [] for item in params.get("QualityEvaluationResults"): obj = QualityControlResultItems() obj._deserialize(item) self.QualityEvaluationResults.append(obj) if params.get("JitterResults") is not None: self.JitterResults = [] for item in params.get("JitterResults"): obj = QualityControlResultItems() obj._deserialize(item) self.JitterResults.append(obj) if params.get("BlurResults") is not None: self.BlurResults = [] for item in params.get("BlurResults"): obj = QualityControlResultItems() obj._deserialize(item) self.BlurResults.append(obj) if params.get("AbnormalLightingResults") is not None: self.AbnormalLightingResults = [] for item in params.get("AbnormalLightingResults"): obj = QualityControlResultItems() obj._deserialize(item) self.AbnormalLightingResults.append(obj) if params.get("CrashScreenResults") is not None: self.CrashScreenResults = [] for item in params.get("CrashScreenResults"): obj = QualityControlResultItems() obj._deserialize(item) self.CrashScreenResults.append(obj) if params.get("BlackWhiteEdgeResults") is not None: self.BlackWhiteEdgeResults = [] for item in params.get("BlackWhiteEdgeResults"): obj = QualityControlResultItems() obj._deserialize(item) self.BlackWhiteEdgeResults.append(obj) if params.get("NoiseResults") is not None: self.NoiseResults = [] for item in params.get("NoiseResults"): obj = QualityControlResultItems() obj._deserialize(item) self.NoiseResults.append(obj) if params.get("MosaicResults") is not None: self.MosaicResults = [] for item in params.get("MosaicResults"): obj = QualityControlResultItems() obj._deserialize(item) self.MosaicResults.append(obj) if params.get("QRCodeResults") is not None: self.QRCodeResults = [] for item in params.get("QRCodeResults"): obj = QualityControlResultItems() obj._deserialize(item) self.QRCodeResults.append(obj) if params.get("VoiceResults") is not None: self.VoiceResults = [] for item in params.get("VoiceResults"): obj = QualityControlResultItems() obj._deserialize(item) self.VoiceResults.append(obj) self.ErrCode = params.get("ErrCode") self.ErrMsg = params.get("ErrMsg") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class QualityControlItem(AbstractModel): """质检结果项 """ def __init__(self): r""" :param Confidence: 置信度,取值范围是 0 到 100 注意:此字段可能返回 null,表示取不到有效值。 :type Confidence: int :param StartTimeOffset: 出现的起始时间戳,秒 :type StartTimeOffset: float :param EndTimeOffset: 出现的结束时间戳,秒 :type EndTimeOffset: float :param AreaCoordsSet: 区域坐标(px),即左上角坐标、右下角坐标 注意:此字段可能返回 null,表示取不到有效值。 :type AreaCoordsSet: list of int non-negative """ self.Confidence = None self.StartTimeOffset = None self.EndTimeOffset = None self.AreaCoordsSet = None def _deserialize(self, params): self.Confidence = params.get("Confidence") self.StartTimeOffset = params.get("StartTimeOffset") self.EndTimeOffset = params.get("EndTimeOffset") self.AreaCoordsSet = params.get("AreaCoordsSet") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class QualityControlResultItems(AbstractModel): """质检结果项数组 """ def __init__(self): r""" :param Id: 异常类型 注意:此字段可能返回 null,表示取不到有效值。 :type Id: str :param QualityControlItems: 质检结果项 :type QualityControlItems: list of QualityControlItem """ self.Id = None self.QualityControlItems = None def _deserialize(self, params): self.Id = params.get("Id") if params.get("QualityControlItems") is not None: self.QualityControlItems = [] for item in params.get("QualityControlItems"): obj = QualityControlItem() obj._deserialize(item) self.QualityControlItems.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class RemoveReverb(AbstractModel): """音频去除混响 """ def __init__(self): r""" :param Type: 去混响类型,可选项:normal :type Type: str """ self.Type = None def _deserialize(self, params): self.Type = params.get("Type") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ResultAudioInfo(AbstractModel): """结果媒体文件的视频流信息 """ def __init__(self): r""" :param StreamId: 流在媒体文件中的流ID 注意:此字段可能返回 null,表示取不到有效值。 :type StreamId: int :param Duration: 流的时长,单位:毫秒 注意:此字段可能返回 null,表示取不到有效值。 :type Duration: int """ self.StreamId = None self.Duration = None def _deserialize(self, params): self.StreamId = params.get("StreamId") self.Duration = params.get("Duration") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ResultVideoInfo(AbstractModel): """结果媒体文件的视频流信息 """ def __init__(self): r""" :param StreamId: 流在媒体文件中的流ID 注意:此字段可能返回 null,表示取不到有效值。 :type StreamId: int :param Duration: 流的时长,单位:毫秒 注意:此字段可能返回 null,表示取不到有效值。 :type Duration: int :param Width: 画面宽度 注意:此字段可能返回 null,表示取不到有效值。 :type Width: int :param Height: 画面高度 注意:此字段可能返回 null,表示取不到有效值。 :type Height: int :param Fps: 视频帧率 注意:此字段可能返回 null,表示取不到有效值。 :type Fps: int """ self.StreamId = None self.Duration = None self.Width = None self.Height = None self.Fps = None def _deserialize(self, params): self.StreamId = params.get("StreamId") self.Duration = params.get("Duration") self.Width = params.get("Width") self.Height = params.get("Height") self.Fps = params.get("Fps") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SaveInfo(AbstractModel): """任务存储信息 """ def __init__(self): r""" :param Type: 存储类型,可选值: 1:CosInfo。 :type Type: int :param CosInfo: Cos形式存储信息,当Type等于1时必选。 :type CosInfo: :class:`tencentcloud.ie.v20200304.models.CosInfo` """ self.Type = None self.CosInfo = None def _deserialize(self, params): self.Type = params.get("Type") if params.get("CosInfo") is not None: self.CosInfo = CosInfo() self.CosInfo._deserialize(params.get("CosInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ScratchRepair(AbstractModel): """去划痕参数 """ def __init__(self): r""" :param Type: 去划痕方式,取值:normal。 :type Type: str :param Ratio: 去划痕强度, 可选项:0.0-1.0。小于0.0的默认为0.0,大于1.0的默认为1.0。 :type Ratio: float """ self.Type = None self.Ratio = None def _deserialize(self, params): self.Type = params.get("Type") self.Ratio = params.get("Ratio") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SectionTime(AbstractModel): """时间区间。 """ def __init__(self): r""" :param StartTime: 开始时间点,单位ms :type StartTime: int :param Duration: 时间区间时长,单位ms :type Duration: int """ self.StartTime = None self.Duration = None def _deserialize(self, params): self.StartTime = params.get("StartTime") self.Duration = params.get("Duration") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SegmentInfo(AbstractModel): """输出文件切片信息 """ def __init__(self): r""" :param FragmentTime: 每个切片平均时长,默认10s。 :type FragmentTime: int :param SegmentType: 切片类型,可选项:hls,不填时默认hls。 :type SegmentType: str :param FragmentName: 切片文件名字。注意: 1.不填切片文件名时,默认按照按照如下格式命名:m3u8文件名{order}。 2.若填了切片文件名字,则会按照如下格式命名:用户指定文件名{order}。 :type FragmentName: str """ self.FragmentTime = None self.SegmentType = None self.FragmentName = None def _deserialize(self, params): self.FragmentTime = params.get("FragmentTime") self.SegmentType = params.get("SegmentType") self.FragmentName = params.get("FragmentName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Sharp(AbstractModel): """细节增强参数 """ def __init__(self): r""" :param Type: 细节增强方式,取值:normal。 :type Type: str :param Ratio: 细节增强强度,可选项:0.0-1.0。小于0.0的默认为0.0,大于1.0的默认为1.0。 :type Ratio: float """ self.Type = None self.Ratio = None def _deserialize(self, params): self.Type = params.get("Type") self.Ratio = params.get("Ratio") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class StopMediaProcessTaskRequest(AbstractModel): """StopMediaProcessTask请求参数结构体 """ def __init__(self): r""" :param TaskId: 编辑处理任务ID。 :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class StopMediaProcessTaskResponse(AbstractModel): """StopMediaProcessTask返回参数结构体 """ def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None def _deserialize(self, params): self.RequestId = params.get("RequestId") class StopMediaQualityRestorationTaskRequest(AbstractModel): """StopMediaQualityRestorationTask请求参数结构体 """ def __init__(self): r""" :param TaskId: 要删除的画质重生任务ID。 :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class StopMediaQualityRestorationTaskResponse(AbstractModel): """StopMediaQualityRestorationTask返回参数结构体 """ def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None def _deserialize(self, params): self.RequestId = params.get("RequestId") class StripEditingInfo(AbstractModel): """智能拆条任务参数信息 """ def __init__(self): r""" :param Switch: 是否开启智能拆条。0为关闭,1为开启。其他非0非1值默认为0。 :type Switch: int :param CustomInfo: 额外定制化服务参数。参数为序列化的Json字符串,例如:{"k1":"v1"}。 :type CustomInfo: str """ self.Switch = None self.CustomInfo = None def _deserialize(self, params): self.Switch = params.get("Switch") self.CustomInfo = params.get("CustomInfo") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class StripTaskResult(AbstractModel): """智能拆条结果信息 """ def __init__(self): r""" :param Status: 编辑任务状态。 1:执行中;2:成功;3:失败。 :type Status: int :param ErrCode: 编辑任务失败错误码。 0:成功;其他值:失败。 :type ErrCode: int :param ErrMsg: 编辑任务失败错误描述。 :type ErrMsg: str :param ItemSet: 智能拆条结果集。 注意:此字段可能返回 null,表示取不到有效值。 :type ItemSet: list of StripTaskResultItem """ self.Status = None self.ErrCode = None self.ErrMsg = None self.ItemSet = None def _deserialize(self, params): self.Status = params.get("Status") self.ErrCode = params.get("ErrCode") self.ErrMsg = params.get("ErrMsg") if params.get("ItemSet") is not None: self.ItemSet = [] for item in params.get("ItemSet"): obj = StripTaskResultItem() obj._deserialize(item) self.ItemSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class StripTaskResultItem(AbstractModel): """智能拆条结果项 """ def __init__(self): r""" :param SegmentUrl: 视频拆条片段地址。 :type SegmentUrl: str :param CovImgUrl: 拆条封面图片地址。 :type CovImgUrl: str :param Confidence: 置信度,取值范围是 0 到 100。 :type Confidence: float :param StartTimeOffset: 拆条片段起始的偏移时间,单位:秒。 :type StartTimeOffset: float :param EndTimeOffset: 拆条片段终止的偏移时间,单位:秒。 :type EndTimeOffset: float """ self.SegmentUrl = None self.CovImgUrl = None self.Confidence = None self.StartTimeOffset = None self.EndTimeOffset = None def _deserialize(self, params): self.SegmentUrl = params.get("SegmentUrl") self.CovImgUrl = params.get("CovImgUrl") self.Confidence = params.get("Confidence") self.StartTimeOffset = params.get("StartTimeOffset") self.EndTimeOffset = params.get("EndTimeOffset") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SubTaskResultItem(AbstractModel): """画质重生子任务结果 """ def __init__(self): r""" :param TaskName: 子任务名称。 注意:此字段可能返回 null,表示取不到有效值。 :type TaskName: str :param StatusCode: 子任务状态。 0:成功; 1:执行中; 其他值:失败。 :type StatusCode: int :param StatusMsg: 子任务状态描述。 :type StatusMsg: str :param ProgressRate: 子任务进度。 注意:此字段可能返回 null,表示取不到有效值。 :type ProgressRate: int :param DownloadUrl: 画质重生处理后文件的下载地址。 注意:此字段可能返回 null,表示取不到有效值。 :type DownloadUrl: str :param Md5: 画质重生处理后文件的MD5。 注意:此字段可能返回 null,表示取不到有效值。 :type Md5: str :param FileInfo: 画质重生处理后文件的详细信息。 注意:此字段可能返回 null,表示取不到有效值。 :type FileInfo: :class:`tencentcloud.ie.v20200304.models.FileInfo` """ self.TaskName = None self.StatusCode = None self.StatusMsg = None self.ProgressRate = None self.DownloadUrl = None self.Md5 = None self.FileInfo = None def _deserialize(self, params): self.TaskName = params.get("TaskName") self.StatusCode = params.get("StatusCode") self.StatusMsg = params.get("StatusMsg") self.ProgressRate = params.get("ProgressRate") self.DownloadUrl = params.get("DownloadUrl") self.Md5 = params.get("Md5") if params.get("FileInfo") is not None: self.FileInfo = FileInfo() self.FileInfo._deserialize(params.get("FileInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SubTaskTranscodeInfo(AbstractModel): """画质重生子任务参数信息 """ def __init__(self): r""" :param TaskName: 子任务名称。 :type TaskName: str :param TargetInfo: 目标文件信息。 :type TargetInfo: :class:`tencentcloud.ie.v20200304.models.TargetInfo` :param EditInfo: 视频剪辑信息。注意:如果填写了EditInfo,则VideoInfo和AudioInfo必填 :type EditInfo: :class:`tencentcloud.ie.v20200304.models.EditInfo` :param VideoInfo: 视频转码信息,不填保持和源文件一致。 :type VideoInfo: :class:`tencentcloud.ie.v20200304.models.VideoInfo` :param AudioInfo: 音频转码信息,不填保持和源文件一致。 :type AudioInfo: :class:`tencentcloud.ie.v20200304.models.AudioInfo` :param MuxInfo: 指定封装信息。 :type MuxInfo: :class:`tencentcloud.ie.v20200304.models.MuxInfo` """ self.TaskName = None self.TargetInfo = None self.EditInfo = None self.VideoInfo = None self.AudioInfo = None self.MuxInfo = None def _deserialize(self, params): self.TaskName = params.get("TaskName") if params.get("TargetInfo") is not None: self.TargetInfo = TargetInfo() self.TargetInfo._deserialize(params.get("TargetInfo")) if params.get("EditInfo") is not None: self.EditInfo = EditInfo() self.EditInfo._deserialize(params.get("EditInfo")) if params.get("VideoInfo") is not None: self.VideoInfo = VideoInfo() self.VideoInfo._deserialize(params.get("VideoInfo")) if params.get("AudioInfo") is not None: self.AudioInfo = AudioInfo() self.AudioInfo._deserialize(params.get("AudioInfo")) if params.get("MuxInfo") is not None: self.MuxInfo = MuxInfo() self.MuxInfo._deserialize(params.get("MuxInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SubtitleItem(AbstractModel): """语音字幕识别项 """ def __init__(self): r""" :param Id: 语音识别结果 :type Id: str :param Zh: 中文翻译结果 注意:此字段可能返回 null,表示取不到有效值。 :type Zh: str :param En: 英文翻译结果 注意:此字段可能返回 null,表示取不到有效值。 :type En: str :param StartPts: 语句起始时间戳PTS(ms) :type StartPts: int :param EndPts: 语句结束时间戳PTS(ms) :type EndPts: int :param Period: 字符串形式的起始结束时间 :type Period: str :param Confidence: 结果的置信度(百分制) :type Confidence: int :param EndFlag: 当前语句是否结束 :type EndFlag: bool :param PuncEndTs: 语句分割时间戳 注意:此字段可能返回 null,表示取不到有效值。 :type PuncEndTs: str """ self.Id = None self.Zh = None self.En = None self.StartPts = None self.EndPts = None self.Period = None self.Confidence = None self.EndFlag = None self.PuncEndTs = None def _deserialize(self, params): self.Id = params.get("Id") self.Zh = params.get("Zh") self.En = params.get("En") self.StartPts = params.get("StartPts") self.EndPts = params.get("EndPts") self.Period = params.get("Period") self.Confidence = params.get("Confidence") self.EndFlag = params.get("EndFlag") self.PuncEndTs = params.get("PuncEndTs") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SubtitleRec(AbstractModel): """语音字幕任务参数 """ def __init__(self): r""" :param AsrDst: 语音识别: zh:中文 en:英文 :type AsrDst: str :param TransDst: 翻译识别: zh:中文 en:英文 :type TransDst: str """ self.AsrDst = None self.TransDst = None def _deserialize(self, params): self.AsrDst = params.get("AsrDst") self.TransDst = params.get("TransDst") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SubtitleResult(AbstractModel): """语音字幕识别结果 """ def __init__(self): r""" :param SubtitleItems: 语音字幕数组 :type SubtitleItems: list of SubtitleItem """ self.SubtitleItems = None def _deserialize(self, params): if params.get("SubtitleItems") is not None: self.SubtitleItems = [] for item in params.get("SubtitleItems"): obj = SubtitleItem() obj._deserialize(item) self.SubtitleItems.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TagEditingInfo(AbstractModel): """视频标签识别任务参数信息 """ def __init__(self): r""" :param Switch: 是否开启视频标签识别。0为关闭,1为开启。其他非0非1值默认为0。 :type Switch: int :param CustomInfo: 额外定制化服务参数。参数为序列化的Json字符串,例如:{"k1":"v1"}。 :type CustomInfo: str """ self.Switch = None self.CustomInfo = None def _deserialize(self, params): self.Switch = params.get("Switch") self.CustomInfo = params.get("CustomInfo") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TagItem(AbstractModel): """标签项 """ def __init__(self): r""" :param Id: 标签内容 :type Id: str :param Confidence: 结果的置信度(百分制) :type Confidence: int :param Categorys: 分级数组 注意:此字段可能返回 null,表示取不到有效值。 :type Categorys: list of str :param Ext: 标签备注 注意:此字段可能返回 null,表示取不到有效值。 :type Ext: str """ self.Id = None self.Confidence = None self.Categorys = None self.Ext = None def _deserialize(self, params): self.Id = params.get("Id") self.Confidence = params.get("Confidence") self.Categorys = params.get("Categorys") self.Ext = params.get("Ext") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TagTaskResult(AbstractModel): """视频标签识别结果信息 """ def __init__(self): r""" :param Status: 编辑任务状态。 1:执行中;2:成功;3:失败。 :type Status: int :param ErrCode: 编辑任务失败错误码。 0:成功;其他值:失败。 :type ErrCode: int :param ErrMsg: 编辑任务失败错误描述。 :type ErrMsg: str :param ItemSet: 视频标签识别结果集。 注意:此字段可能返回 null,表示取不到有效值。 :type ItemSet: list of TagTaskResultItem """ self.Status = None self.ErrCode = None self.ErrMsg = None self.ItemSet = None def _deserialize(self, params): self.Status = params.get("Status") self.ErrCode = params.get("ErrCode") self.ErrMsg = params.get("ErrMsg") if params.get("ItemSet") is not None: self.ItemSet = [] for item in params.get("ItemSet"): obj = TagTaskResultItem() obj._deserialize(item) self.ItemSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TagTaskResultItem(AbstractModel): """视频标签识别结果项 """ def __init__(self): r""" :param Tag: 标签名称。 :type Tag: str :param Confidence: 置信度,取值范围是 0 到 100。 :type Confidence: float """ self.Tag = None self.Confidence = None def _deserialize(self, params): self.Tag = params.get("Tag") self.Confidence = params.get("Confidence") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TargetInfo(AbstractModel): """输出文件信息 """ def __init__(self): r""" :param FileName: 目标文件名 :type FileName: str :param SegmentInfo: 目标文件切片信息 :type SegmentInfo: :class:`tencentcloud.ie.v20200304.models.SegmentInfo` """ self.FileName = None self.SegmentInfo = None def _deserialize(self, params): self.FileName = params.get("FileName") if params.get("SegmentInfo") is not None: self.SegmentInfo = SegmentInfo() self.SegmentInfo._deserialize(params.get("SegmentInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TargetVideoInfo(AbstractModel): """目标视频信息。 """ def __init__(self): r""" :param Width: 视频宽度,单位像素 :type Width: int :param Height: 视频高度,单位像素 :type Height: int :param FrameRate: 视频帧率,范围在1到120之间 :type FrameRate: int """ self.Width = None self.Height = None self.FrameRate = None def _deserialize(self, params): self.Width = params.get("Width") self.Height = params.get("Height") self.FrameRate = params.get("FrameRate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TaskResultFile(AbstractModel): """任务结果文件信息 """ def __init__(self): r""" :param Url: 文件链接。 注意:此字段可能返回 null,表示取不到有效值。 :type Url: str :param FileSize: 文件大小,部分任务支持,单位:字节 注意:此字段可能返回 null,表示取不到有效值。 :type FileSize: int :param MediaInfo: 媒体信息,对于媒体文件,部分任务支持返回 注意:此字段可能返回 null,表示取不到有效值。 :type MediaInfo: :class:`tencentcloud.ie.v20200304.models.MediaResultInfo` """ self.Url = None self.FileSize = None self.MediaInfo = None def _deserialize(self, params): self.Url = params.get("Url") self.FileSize = params.get("FileSize") if params.get("MediaInfo") is not None: self.MediaInfo = MediaResultInfo() self.MediaInfo._deserialize(params.get("MediaInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TextMarkInfoItem(AbstractModel): """画质重生子任务文字水印信息 """ def __init__(self): r""" :param Text: 文字内容。 :type Text: str :param PosX: 文字水印X坐标。 :type PosX: int :param PosY: 文字水印Y坐标。 :type PosY: int :param FontSize: 文字大小 :type FontSize: int :param FontFile: 字体,可选项:hei,song,simkai,arial;默认hei(黑体)。 :type FontFile: str :param FontColor: 字体颜色,颜色见附录,不填默认black。 :type FontColor: str :param FontAlpha: 文字透明度,可选值0-1。0:不透明,1:全透明。默认为0 :type FontAlpha: float """ self.Text = None self.PosX = None self.PosY = None self.FontSize = None self.FontFile = None self.FontColor = None self.FontAlpha = None def _deserialize(self, params): self.Text = params.get("Text") self.PosX = params.get("PosX") self.PosY = params.get("PosY") self.FontSize = params.get("FontSize") self.FontFile = params.get("FontFile") self.FontColor = params.get("FontColor") self.FontAlpha = params.get("FontAlpha") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class UrlInfo(AbstractModel): """任务视频Url形式下载信息。 """ def __init__(self): r""" :param Url: 视频 URL。 注意:编辑理解仅支持mp4、flv等格式的点播文件,不支持hls; :type Url: str :param Format: 视频地址格式,可选值: 0:音视频 ; 1:直播流。 默认为0。其他非0非1值默认为0。画质重生任务只支持0。 :type Format: int :param Host: 【不再支持】指定请求资源时,HTTP头部host的值。 :type Host: str """ self.Url = None self.Format = None self.Host = None def _deserialize(self, params): self.Url = params.get("Url") self.Format = params.get("Format") self.Host = params.get("Host") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class VideoEnhance(AbstractModel): """画质增强参数信息 """ def __init__(self): r""" :param ArtifactReduction: 去编码毛刺、伪影参数。 :type ArtifactReduction: :class:`tencentcloud.ie.v20200304.models.ArtifactReduction` :param Denoising: 去噪声参数。 :type Denoising: :class:`tencentcloud.ie.v20200304.models.Denoising` :param ColorEnhance: 颜色增强参数。 :type ColorEnhance: :class:`tencentcloud.ie.v20200304.models.ColorEnhance` :param Sharp: 细节增强参数。 :type Sharp: :class:`tencentcloud.ie.v20200304.models.Sharp` :param WdSuperResolution: 超分参数,可选项:2,目前仅支持2倍超分。 注意:此参数已经弃用,超分可以使用VideoSuperResolution参数 :type WdSuperResolution: int :param FaceProtect: 人脸保护信息。 :type FaceProtect: :class:`tencentcloud.ie.v20200304.models.FaceProtect` :param WdFps: 插帧,取值范围:[0, 60],单位:Hz。 注意:当取值为 0,表示帧率和原始视频保持一致。 :type WdFps: int :param ScratchRepair: 去划痕参数 :type ScratchRepair: :class:`tencentcloud.ie.v20200304.models.ScratchRepair` :param LowLightEnhance: 低光照增强参数 :type LowLightEnhance: :class:`tencentcloud.ie.v20200304.models.LowLightEnhance` :param VideoSuperResolution: 视频超分参数 :type VideoSuperResolution: :class:`tencentcloud.ie.v20200304.models.VideoSuperResolution` :param VideoRepair: 视频画质修复参数 :type VideoRepair: :class:`tencentcloud.ie.v20200304.models.VideoRepair` """ self.ArtifactReduction = None self.Denoising = None self.ColorEnhance = None self.Sharp = None self.WdSuperResolution = None self.FaceProtect = None self.WdFps = None self.ScratchRepair = None self.LowLightEnhance = None self.VideoSuperResolution = None self.VideoRepair = None def _deserialize(self, params): if params.get("ArtifactReduction") is not None: self.ArtifactReduction = ArtifactReduction() self.ArtifactReduction._deserialize(params.get("ArtifactReduction")) if params.get("Denoising") is not None: self.Denoising = Denoising() self.Denoising._deserialize(params.get("Denoising")) if params.get("ColorEnhance") is not None: self.ColorEnhance = ColorEnhance() self.ColorEnhance._deserialize(params.get("ColorEnhance")) if params.get("Sharp") is not None: self.Sharp = Sharp() self.Sharp._deserialize(params.get("Sharp")) self.WdSuperResolution = params.get("WdSuperResolution") if params.get("FaceProtect") is not None: self.FaceProtect = FaceProtect() self.FaceProtect._deserialize(params.get("FaceProtect")) self.WdFps = params.get("WdFps") if params.get("ScratchRepair") is not None: self.ScratchRepair = ScratchRepair() self.ScratchRepair._deserialize(params.get("ScratchRepair")) if params.get("LowLightEnhance") is not None: self.LowLightEnhance = LowLightEnhance() self.LowLightEnhance._deserialize(params.get("LowLightEnhance")) if params.get("VideoSuperResolution") is not None: self.VideoSuperResolution = VideoSuperResolution() self.VideoSuperResolution._deserialize(params.get("VideoSuperResolution")) if params.get("VideoRepair") is not None: self.VideoRepair = VideoRepair() self.VideoRepair._deserialize(params.get("VideoRepair")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class VideoInfo(AbstractModel): """视频转码信息 """ def __init__(self): r""" :param Fps: 视频帧率,取值范围:[0, 60],单位:Hz。 注意:当取值为 0,表示帧率和原始视频保持一致。 :type Fps: int :param Width: 宽度,取值范围:0 和 [128, 4096] 注意: 当 Width、Height 均为 0,则分辨率同源; 当 Width 为 0,Height 非 0,则 Width 按比例缩放; 当 Width 非 0,Height 为 0,则 Height 按比例缩放; 当 Width、Height 均非 0,则分辨率按用户指定。 :type Width: int :param Height: 高度,取值范围:0 和 [128, 4096] 注意: 当 Width、Height 均为 0,则分辨率同源; 当 Width 为 0,Height 非 0,则 Width 按比例缩放; 当 Width 非 0,Height 为 0,则 Height 按比例缩放; 当 Width、Height 均非 0,则分辨率按用户指定。 :type Height: int :param LongSide: 长边分辨率,取值范围:0 和 [128, 4096] 注意: 当 LongSide、ShortSide 均为 0,则分辨率按照Width,Height; 当 LongSide 为 0,ShortSide 非 0,则 LongSide 按比例缩放; 当 LongSide非 0,ShortSide为 0,则 ShortSide 按比例缩放; 当 LongSide、ShortSide 均非 0,则分辨率按用户指定。 长短边优先级高于Weight,Height,设置长短边则忽略宽高。 :type LongSide: int :param ShortSide: 短边分辨率,取值范围:0 和 [128, 4096] 注意: 当 LongSide、ShortSide 均为 0,则分辨率按照Width,Height; 当 LongSide 为 0,ShortSide 非 0,则 LongSide 按比例缩放; 当 LongSide非 0,ShortSide为 0,则 ShortSide 按比例缩放; 当 LongSide、ShortSide 均非 0,则分辨率按用户指定。 长短边优先级高于Weight,Height,设置长短边则忽略宽高。 :type ShortSide: int :param Bitrate: 视频流的码率,取值范围:0 和 [128, 35000],单位:kbps。当取值为 0,表示视频码率和原始视频保持一致。 :type Bitrate: int :param Gop: 固定I帧之间,视频帧数量,取值范围: [25, 2500],如果不填,使用编码默认最优序列。 :type Gop: int :param VideoCodec: 编码器支持选项,可选值: h264, h265, av1。 不填默认h264。 :type VideoCodec: str :param PicMarkInfo: 图片水印。 :type PicMarkInfo: list of PicMarkInfoItem :param DarInfo: 填充方式,当视频流配置宽高参数与原始视频的宽高比不一致时,对转码的处理方式,即为“填充”。 :type DarInfo: :class:`tencentcloud.ie.v20200304.models.DarInfo` :param Hdr: 支持hdr,可选项: hdr10, hlg。 此时,VideoCodec会强制设置为h265, 编码位深为10 :type Hdr: str :param VideoEnhance: 画质增强参数信息。 :type VideoEnhance: :class:`tencentcloud.ie.v20200304.models.VideoEnhance` :param HiddenMarkInfo: 数字水印参数信息。 :type HiddenMarkInfo: :class:`tencentcloud.ie.v20200304.models.HiddenMarkInfo` :param TextMarkInfo: 文本水印参数信息。 :type TextMarkInfo: list of TextMarkInfoItem """ self.Fps = None self.Width = None self.Height = None self.LongSide = None self.ShortSide = None self.Bitrate = None self.Gop = None self.VideoCodec = None self.PicMarkInfo = None self.DarInfo = None self.Hdr = None self.VideoEnhance = None self.HiddenMarkInfo = None self.TextMarkInfo = None def _deserialize(self, params): self.Fps = params.get("Fps") self.Width = params.get("Width") self.Height = params.get("Height") self.LongSide = params.get("LongSide") self.ShortSide = params.get("ShortSide") self.Bitrate = params.get("Bitrate") self.Gop = params.get("Gop") self.VideoCodec = params.get("VideoCodec") if params.get("PicMarkInfo") is not None: self.PicMarkInfo = [] for item in params.get("PicMarkInfo"): obj = PicMarkInfoItem() obj._deserialize(item) self.PicMarkInfo.append(obj) if params.get("DarInfo") is not None: self.DarInfo = DarInfo() self.DarInfo._deserialize(params.get("DarInfo")) self.Hdr = params.get("Hdr") if params.get("VideoEnhance") is not None: self.VideoEnhance = VideoEnhance() self.VideoEnhance._deserialize(params.get("VideoEnhance")) if params.get("HiddenMarkInfo") is not None: self.HiddenMarkInfo = HiddenMarkInfo() self.HiddenMarkInfo._deserialize(params.get("HiddenMarkInfo")) if params.get("TextMarkInfo") is not None: self.TextMarkInfo = [] for item in params.get("TextMarkInfo"): obj = TextMarkInfoItem() obj._deserialize(item) self.TextMarkInfo.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class VideoInfoResultItem(AbstractModel): """任务结束后生成的文件视频信息 """ def __init__(self): r""" :param Stream: 视频流的流id。 :type Stream: int :param Width: 视频宽度。 注意:此字段可能返回 null,表示取不到有效值。 :type Width: int :param Height: 视频高度。 注意:此字段可能返回 null,表示取不到有效值。 :type Height: int :param Bitrate: 视频码率,单位:bps。 注意:此字段可能返回 null,表示取不到有效值。 :type Bitrate: int :param Fps: 视频帧率,用分数格式表示,如:25/1, 99/32等等。 注意:此字段可能返回 null,表示取不到有效值。 :type Fps: str :param Codec: 编码格式,如h264,h265等等 。 注意:此字段可能返回 null,表示取不到有效值。 :type Codec: str :param Rotate: 播放旋转角度,可选值0-360。 注意:此字段可能返回 null,表示取不到有效值。 :type Rotate: int :param Duration: 视频时长,单位:ms 。 注意:此字段可能返回 null,表示取不到有效值。 :type Duration: int :param PixFormat: 颜色空间,如yuv420p,yuv444p等等。 注意:此字段可能返回 null,表示取不到有效值。 :type PixFormat: str """ self.Stream = None self.Width = None self.Height = None self.Bitrate = None self.Fps = None self.Codec = None self.Rotate = None self.Duration = None self.PixFormat = None def _deserialize(self, params): self.Stream = params.get("Stream") self.Width = params.get("Width") self.Height = params.get("Height") self.Bitrate = params.get("Bitrate") self.Fps = params.get("Fps") self.Codec = params.get("Codec") self.Rotate = params.get("Rotate") self.Duration = params.get("Duration") self.PixFormat = params.get("PixFormat") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class VideoRepair(AbstractModel): """综合画质修复,包括:去噪,去毛刺,细节增强,主观画质提升。 """ def __init__(self): r""" :param Type: 画质修复类型,可选值:weak,normal,strong; 默认值: weak :type Type: str """ self.Type = None def _deserialize(self, params): self.Type = params.get("Type") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class VideoSuperResolution(AbstractModel): """视频超分 """ def __init__(self): r""" :param Type: 超分视频类型:可选值:lq,hq lq: 针对低清晰度有较多噪声视频的超分; hq: 针对高清晰度视频超分; 默认取值:lq。 :type Type: str :param Size: 超分倍数,可选值:2。 注意:当前只支持两倍超分。 :type Size: int """ self.Type = None self.Size = None def _deserialize(self, params): self.Type = params.get("Type") self.Size = params.get("Size") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set))
tzpBingo/github-trending
codespace/python/tencentcloud/ie/v20200304/models.py
Python
mit
146,031
0.003101
# -*- coding: utf-8 -*- import collections class InvalidOperatorError(ValueError): pass class DuplicateFieldError(ValueError): pass class FieldDict(dict): def __setitem__(self, k, v): if k in self: raise DuplicateFieldError('Field "{0}" already set.'.format(k)) super(FieldDict, self).__setitem__(k, v) def update(self, E=None, **F): raise NotImplementedError() class OperatorDict(collections.defaultdict): OPERATORS = ('$inc', '$rename', '$set', '$unset', '$push', '$pushAll', '$addToSet', '$pop', '$pull', '$pullAll') def __init__(self): super(OperatorDict, self).__init__(FieldDict) def __setitem__(self, k, v): if k not in self.OPERATORS: raise InvalidOperatorError('"{0}" is not a valid operator'.format(k)) super(OperatorDict, self).__setitem__(k, v) def update(self, E=None, **F): raise NotImplementedError() class Update(object): def __init__(self): self._ops = OperatorDict() def __iter__(self): return self._ops.iteritems() def __contains__(self, item): return any(item in fields for _, fields in self._ops.items()) def clear(self): self._ops.clear() def drop_field(self, field): empty_keys = [] for k, updates in self._ops.iteritems(): updates.pop(field, None) if not updates: empty_keys.append(k) for k in empty_keys: del self._ops[k] def set(self, field, value): self._ops['$set'][field] = value def inc(self, field, increment): """ >>> update = Update() >>> update.inc('foo', 'bar') >>> dict(update) {'$inc': {'foo': 'bar'}} """ self._ops['$inc'][field] = increment def rename(self, old, new): """ >>> update = Update() >>> update.rename('old', 'new') >>> dict(update) {'$rename': {'old': 'new'}} """ self._ops['$rename'][old] = new def unset(self, name): self._ops['$unset'][name] = 1 def push(self, name, value): self._ops['$push'][name] = value def pushAll(self, name, values): self._ops['$pushAll'][name] = values def addToSet(self, name, value): self._ops['$addToSet'][name] = value def pop(self, name, first=False): v = (-1 if first else 1) self._ops['$pop'][name] = v def pull(self, name, value): self._ops['$pull'][name] = value def pullAll(self, name, values): self._ops['$pullAll'][name] = values
voxelbrain/dibble
dibble/update.py
Python
bsd-3-clause
2,635
0.000759
# -*- python -*- # Package : omniidl # template.py Created on: 2000/01/18 # Author : David Scott (djs) # # Copyright (C) 2003-2008 Apasphere Ltd # Copyright (C) 1999 AT&T Laboratories Cambridge # # This file is part of omniidl. # # omniidl is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # Description: # # C++ templates for the .hh file # $Id: template.py 5867 2009-05-06 16:16:18Z dgrisby $ # $Log$ # Revision 1.8.2.20 2008/12/29 18:44:38 dgrisby # Globally scope array functions to avoid ambiguities. # # Revision 1.8.2.19 2008/12/03 10:53:58 dgrisby # Tweaks leading to Python 3 support; other minor clean-ups. # # Revision 1.8.2.18 2007/09/19 14:16:07 dgrisby # Avoid namespace clashes if IDL defines modules named CORBA. # # Revision 1.8.2.17 2007/05/11 09:52:27 dgrisby # New -Wbguard_prefix option. Thanks Austin Bingham. # # Revision 1.8.2.16 2006/10/23 15:36:25 dgrisby # Undefine USE_stub_in_nt_dll at the end of header if it was not defined # at the start. # # Revision 1.8.2.15 2006/09/04 11:40:06 dgrisby # Remove crazy switch code in enum marshalling. # # Revision 1.8.2.14 2006/01/10 12:24:03 dgrisby # Merge from omni4_0_develop pre 4.0.7 release. # # Revision 1.8.2.13 2005/11/14 11:02:16 dgrisby # Local interface fixes. # # Revision 1.8.2.12 2005/11/09 12:22:17 dgrisby # Local interfaces support. # # Revision 1.8.2.11 2005/08/16 13:51:21 dgrisby # Problems with valuetype / abstract interface C++ mapping. # # Revision 1.8.2.10 2005/07/22 17:18:37 dgrisby # Another merge from omni4_0_develop. # # Revision 1.8.2.9 2005/01/06 23:10:06 dgrisby # Big merge from omni4_0_develop. # # Revision 1.8.2.8 2005/01/06 16:35:18 dgrisby # Narrowing for abstract interfaces. # # Revision 1.8.2.7 2004/10/13 17:58:24 dgrisby # Abstract interfaces support; values support interfaces; value bug fixes. # # Revision 1.8.2.6 2004/07/31 23:46:27 dgrisby # Correct constness of exception Any insertion operator. # # Revision 1.8.2.5 2004/07/23 10:29:59 dgrisby # Completely new, much simpler Any implementation. # # Revision 1.8.2.4 2004/07/04 23:53:39 dgrisby # More ValueType TypeCode and Any support. # # Revision 1.8.2.3 2004/02/16 10:10:32 dgrisby # More valuetype, including value boxes. C++ mapping updates. # # Revision 1.8.2.2 2003/10/23 11:25:55 dgrisby # More valuetype support. # # Revision 1.8.2.1 2003/03/23 21:02:36 dgrisby # Start of omniORB 4.1.x development branch. # # Revision 1.5.2.19 2001/11/12 13:46:07 dpg1 # _unchecked_narrow, improved _narrow. # # Revision 1.5.2.18 2001/11/08 16:33:51 dpg1 # Local servant POA shortcut policy. # # Revision 1.5.2.17 2001/10/29 17:42:41 dpg1 # Support forward-declared structs/unions, ORB::create_recursive_tc(). # # Revision 1.5.2.16 2001/10/18 12:45:28 dpg1 # IDL compiler tweaks. # # Revision 1.5.2.15 2001/10/17 16:44:05 dpg1 # Update DynAny to CORBA 2.5 spec, const Any exception extraction. # # Revision 1.5.2.14 2001/09/19 17:29:04 dpg1 # Cosmetic changes. # # Revision 1.5.2.13 2001/08/17 13:45:56 dpg1 # C++ mapping fixes. # # Revision 1.5.2.12 2001/08/15 10:26:10 dpg1 # New object table behaviour, correct POA semantics. # # Revision 1.5.2.11 2001/08/03 17:41:17 sll # System exception minor code overhaul. When a system exeception is raised, # a meaning minor code is provided. # # Revision 1.5.2.10 2001/07/31 19:25:11 sll # Array _var should be separated into fixed and variable size ones. # # Revision 1.5.2.9 2001/06/18 20:30:51 sll # Only define 1 conversion operator from T_var to T* if the compiler is # gcc. Previously, this is only done for gcc 2.7.2. It seems that gcc 3.0 # requires this to be the case. This is the default for all versions of # gcc. # # Revision 1.5.2.8 2001/05/29 17:03:50 dpg1 # In process identity. # # Revision 1.5.2.7 2001/04/19 09:30:12 sll # Big checkin with the brand new internal APIs. # Scoped where appropriate with the omni namespace. # # Revision 1.5.2.6 2001/03/13 10:32:09 dpg1 # Fixed point support. # # Revision 1.5.2.5 2000/11/20 14:43:25 sll # Added support for wchar and wstring. # # Revision 1.5.2.4 2000/11/09 12:27:55 dpg1 # Huge merge from omni3_develop, plus full long long from omni3_1_develop. # # Revision 1.5.2.3 2000/11/03 19:20:41 sll # Replaced old marshal operators with a unified operator for cdrStream. # # Revision 1.5.2.2 2000/10/12 15:37:51 sll # Updated from omni3_1_develop. # # Revision 1.6.2.2 2000/08/21 11:35:18 djs # Lots of tidying # # Revision 1.6.2.1 2000/08/02 10:52:02 dpg1 # New omni3_1_develop branch, merged from omni3_develop. # # Revision 1.6 2000/07/13 15:26:00 dpg1 # Merge from omni3_develop for 3.0 release. # # Revision 1.3.2.15 2000/07/26 15:29:11 djs # Missing typedef and forward when generating BOA skeletons # # Revision 1.3.2.14 2000/07/24 09:35:20 dpg1 # Adding the missing constructor meant that there was no longer a # default constructor. # # Revision 1.3.2.13 2000/07/24 10:17:31 djs # Added missing BOA skeleton constructor # # Revision 1.3.2.12 2000/07/04 12:57:55 djs # Fixed Any insertion/extraction operators for unions and exceptions # # Revision 1.3.2.11 2000/06/26 16:24:00 djs # Better handling of #include'd files (via new commandline options) # Refactoring of configuration state mechanism. # # Revision 1.3.2.10 2000/06/19 18:19:50 djs # Implemented union discriminant setting function _d(_value) with checks for # illegal uses (setting to a label corresponding to a non-current member and # setting before initialisation) # # Revision 1.3.2.9 2000/06/05 13:03:57 djs # Removed union member name clash (x & pd_x, pd__default, pd__d) # Removed name clash when a sequence is called "pd_seq" # Nested union within union fix # Actually generates BOA non-flattened tie templates # # Revision 1.3.2.8 2000/05/31 18:02:58 djs # Better output indenting (and preprocessor directives now correctly output at # the beginning of lines) # # Revision 1.3.2.7 2000/05/30 15:59:25 djs # Removed inheritance ambiguity in generated BOA _sk_ and POA_ classes # # Revision 1.3.2.6 2000/05/18 15:57:33 djs # Added missing T* data constructor for bounded sequence types # # Revision 1.3.2.5 2000/03/20 11:50:20 djs # Removed excess buffering- output templates have code attached which is # lazily evaluated when required. # # Revision 1.3.2.4 2000/03/10 12:01:03 djr # Re-fixed omniidl (make exception _NP_duplicate() public). # # Revision 1.3.2.3 2000/03/09 15:22:42 djs # Changing the protection status of an exception method, mirroring a change # in omniidl3 # # Revision 1.3.2.2 2000/03/07 18:07:33 djr # Fixed user-exceptions when can't catch by base class. # # Revision 1.3.2.1 2000/03/03 14:29:17 djr # Improvement to BOA skeletons (less generated code). # # Revision 1.3 2000/02/01 09:26:45 djs # Tracking fixes in old compiler: powerpc-aix scoped identifier workarounds # # Revision 1.2 2000/01/19 11:23:29 djs # Moved most C++ code to template file # # Revision 1.1 2000/01/18 18:05:53 djs # Extracted most C++ from header/defs and put in a template file. # General refactoring. # """C++ templates for the .hh file""" ## ## File header ## header = """\ // This file is generated by @program@- @library@. Do not edit. #ifndef @guard_prefix@__@guard@_hh__ #define @guard_prefix@__@guard@_hh__ """ footer = """\ #endif """ ## ## Main file ## main = """\ #ifndef __CORBA_H_EXTERNAL_GUARD__ #include <omniORB4/CORBA.h> #endif #ifndef USE_stub_in_nt_dll # define USE_stub_in_nt_dll_NOT_DEFINED_@guard@ #endif #ifndef USE_core_stub_in_nt_dll # define USE_core_stub_in_nt_dll_NOT_DEFINED_@guard@ #endif #ifndef USE_dyn_stub_in_nt_dll # define USE_dyn_stub_in_nt_dll_NOT_DEFINED_@guard@ #endif @sub_include_pre@ @cxx_direct_include@ @includes@ @sub_include_post@ #ifdef USE_stub_in_nt_dll # ifndef USE_core_stub_in_nt_dll # define USE_core_stub_in_nt_dll # endif # ifndef USE_dyn_stub_in_nt_dll # define USE_dyn_stub_in_nt_dll # endif #endif #ifdef _core_attr # error "A local CPP macro _core_attr has already been defined." #else # ifdef USE_core_stub_in_nt_dll # define _core_attr _OMNIORB_NTDLL_IMPORT # else # define _core_attr # endif #endif #ifdef _dyn_attr # error "A local CPP macro _dyn_attr has already been defined." #else # ifdef USE_dyn_stub_in_nt_dll # define _dyn_attr _OMNIORB_NTDLL_IMPORT # else # define _dyn_attr # endif #endif @forward_declarations@ @string_tcParser_declarations@ @defs@ @poa@ @obv@ @other_tie@ #undef _core_attr #undef _dyn_attr @operators@ @marshalling@ #ifdef USE_stub_in_nt_dll_NOT_DEFINED_@guard@ # undef USE_stub_in_nt_dll # undef USE_stub_in_nt_dll_NOT_DEFINED_@guard@ #endif #ifdef USE_core_stub_in_nt_dll_NOT_DEFINED_@guard@ # undef USE_core_stub_in_nt_dll # undef USE_core_stub_in_nt_dll_NOT_DEFINED_@guard@ #endif #ifdef USE_dyn_stub_in_nt_dll_NOT_DEFINED_@guard@ # undef USE_dyn_stub_in_nt_dll # undef USE_dyn_stub_in_nt_dll_NOT_DEFINED_@guard@ #endif #endif // __@guard@_hh__ """ sub_include_pre = """\ #ifdef INCLUDED_stub_in_nt_dll # ifdef USE_stub_in_nt_dll # error "cannot use both INCLUDED_stub_in_nt_dll and USE_stub_in_nt_dll." # else # define USE_stub_in_nt_dll # endif # define INCLUDED_stub_in_nt_dll_DEFINED_@guard@ # undef INCLUDED_stub_in_nt_dll #endif """ sub_include_post = """\ #ifdef INCLUDED_stub_in_nt_dll_DEFINED_@guard@ # undef USE_stub_in_nt_dll # define INCLUDED_stub_in_nt_dll # undef INCLUDED_stub_in_nt_dll_DEFINED_@guard@ #endif """ main_include = """\ #ifndef @guard_prefix@__@guardname@_EXTERNAL_GUARD__ #define @guard_prefix@__@guardname@_EXTERNAL_GUARD__ #include @filename@ #endif""" ## ## Modules ## # name => C++ form of the module identifier module_begin = """\ _CORBA_MODULE @name@ _CORBA_MODULE_BEG """ module_end = """\ _CORBA_MODULE_END """ POA_module_begin = """\ _CORBA_MODULE @POA_prefix@@name@ _CORBA_MODULE_BEG """ POA_module_end = """\ _CORBA_MODULE_END """ OBV_module_begin = """\ _CORBA_MODULE @OBV_prefix@@name@ _CORBA_MODULE_BEG """ OBV_module_end = """\ _CORBA_MODULE_END """ POA_interface = """\ class @POA_name@ : public virtual @impl_scopedID@, @inherits@ { public: virtual ~@POA_name@(); inline ::@scopedID@_ptr _this() { return (::@scopedID@_ptr) _do_this(::@scopedID@::_PD_repoId); } }; """ ## ## Interfaces ## interface_Helper = """\ #ifndef __@guard@__ #define __@guard@__ class @name@; class _objref_@name@; class _impl_@name@; @class_sk_name@ typedef _objref_@name@* @name@_ptr; typedef @name@_ptr @name@Ref; class @name@_Helper { public: typedef @name@_ptr _ptr_type; static _ptr_type _nil(); static _CORBA_Boolean is_nil(_ptr_type); static void release(_ptr_type); static void duplicate(_ptr_type); static void marshalObjRef(_ptr_type, cdrStream&); static _ptr_type unmarshalObjRef(cdrStream&); }; typedef _CORBA_ObjRef_Var<_objref_@name@, @name@_Helper> @name@_var; typedef _CORBA_ObjRef_OUT_arg<_objref_@name@,@name@_Helper > @name@_out; #endif """ interface_type = """\ // interface @name@ class @name@ { public: // Declarations for this interface type. typedef @name@_ptr _ptr_type; typedef @name@_var _var_type; static _ptr_type _duplicate(_ptr_type); static _ptr_type _narrow(::CORBA::Object_ptr); static _ptr_type _unchecked_narrow(::CORBA::Object_ptr); @abstract_narrows@ static _ptr_type _nil(); static inline void _marshalObjRef(_ptr_type, cdrStream&); static inline _ptr_type _unmarshalObjRef(cdrStream& s) { omniObjRef* o = omniObjRef::_unMarshal(_PD_repoId,s); if (o) return (_ptr_type) o->_ptrToObjRef(_PD_repoId); else return _nil(); } static _core_attr const char* _PD_repoId; // Other IDL defined within this scope. @Other_IDL@ }; """ interface_abstract_narrows = """\ static _ptr_type _narrow(::CORBA::AbstractBase_ptr); static _ptr_type _unchecked_narrow(::CORBA::AbstractBase_ptr); """ ## ## Abstract Interfaces ## abstract_interface_Helper = """\ #ifndef __@guard@__ #define __@guard@__ class @name@; class _objref_@name@; typedef @name@* @name@_ptr; typedef @name@_ptr @name@Ref; class @name@_Helper { public: typedef @name@_ptr _ptr_type; static _ptr_type _nil(); static _CORBA_Boolean is_nil(_ptr_type); static void release(_ptr_type); static void duplicate(_ptr_type); static void marshalObjRef(_ptr_type, cdrStream&); static _ptr_type unmarshalObjRef(cdrStream&); }; typedef _CORBA_ObjRef_Var<@name@, @name@_Helper> @name@_var; typedef _CORBA_ObjRef_OUT_arg<@name@,@name@_Helper > @name@_out; #endif """ abstract_interface_type = """\ // abstract interface @name@ class @name@ : @inherits@ { public: // Declarations for this interface type. typedef @name@_ptr _ptr_type; typedef @name@_var _var_type; static _ptr_type _duplicate(_ptr_type); static _ptr_type _narrow(::CORBA::AbstractBase_ptr); static _ptr_type _unchecked_narrow(::CORBA::AbstractBase_ptr); static _ptr_type _nil(); static inline void _marshalObjRef(_ptr_type, cdrStream&); static inline _ptr_type _unmarshalObjRef(cdrStream& s) { _CORBA_Boolean b = s.unmarshalBoolean(); if (b) { omniObjRef* o = omniObjRef::_unMarshal(_PD_repoId,s); if (o) return (_ptr_type) o->_ptrToObjRef(_PD_repoId); else return _nil(); } else { ::CORBA::ValueBase* v = ::CORBA::ValueBase::_NP_unmarshal(s); if (v) return (_ptr_type) v->_ptrToValue(_PD_repoId); else return 0; } } static _core_attr const char* _PD_repoId; // Other IDL defined within this scope. @Other_IDL@ // Operations declared in this abstract interface @operations@ }; """ ## ## Local Interfaces ## local_interface_Helper = """\ #ifndef __@guard@__ #define __@guard@__ class @name@; typedef @name@* @name@_ptr; typedef @name@_ptr @name@Ref; class @name@_Helper { public: typedef @name@_ptr _ptr_type; static _ptr_type _nil(); static _CORBA_Boolean is_nil(_ptr_type); static void release(_ptr_type); static void duplicate(_ptr_type); static void marshalObjRef(_ptr_type, cdrStream&); static _ptr_type unmarshalObjRef(cdrStream&); }; typedef _CORBA_ObjRef_Var<@name@, @name@_Helper> @name@_var; typedef _CORBA_ObjRef_OUT_arg<@name@,@name@_Helper > @name@_out; #endif """ local_interface_type = """\ // local interface @name@ class @name@ : @inherits@ { public: // Declarations for this interface type. typedef @name@_ptr _ptr_type; typedef @name@_var _var_type; static _ptr_type _duplicate(_ptr_type); static _ptr_type _narrow(::CORBA::Object_ptr); static _ptr_type _unchecked_narrow(::CORBA::Object_ptr); @abstract_narrows@ static _ptr_type _nil(); static inline void _marshalObjRef(_ptr_type, cdrStream& s) { OMNIORB_THROW(MARSHAL, _OMNI_NS(MARSHAL_LocalObject), (::CORBA::CompletionStatus)s.completion()); } static inline _ptr_type _unmarshalObjRef(cdrStream& s) { OMNIORB_THROW(MARSHAL, _OMNI_NS(MARSHAL_LocalObject), (::CORBA::CompletionStatus)s.completion()); #ifdef NEED_DUMMY_RETURN return 0; #endif } static _core_attr const char* _PD_repoId; // Other IDL defined within this scope. @Other_IDL@ // Operations declared in this local interface @operations@ private: virtual void* _ptrToObjRef(const char*); protected: @name@(); virtual ~@name@(); }; class _nil_@name@ : @nil_inherits@ public virtual @name@ { public: @nil_operations@ inline _nil_@name@() { _PR_setobj(0); } protected: virtual ~_nil_@name@(); }; """ ## ## Object reference ## interface_objref = """\ class _objref_@name@ : @inherits@ { public: @operations@ inline _objref_@name@() @init_shortcut@ { _PR_setobj(0); } // nil _objref_@name@(omniIOR*, omniIdentity*); protected: virtual ~_objref_@name@(); @shortcut@ private: virtual void* _ptrToObjRef(const char*); _objref_@name@(const _objref_@name@&); _objref_@name@& operator = (const _objref_@name@&); // not implemented friend class @name@; }; """ interface_shortcut = """\ virtual void _enableShortcut(omniServant*, const _CORBA_Boolean*); _impl_@name@* _shortcut; const _CORBA_Boolean* _invalid;\ """ ## ## Proxy Object Factory ## interface_pof = """\ class _pof_@name@ : public _OMNI_NS(proxyObjectFactory) { public: inline _pof_@name@() : _OMNI_NS(proxyObjectFactory)(@name@::_PD_repoId) {} virtual ~_pof_@name@(); virtual omniObjRef* newObjRef(omniIOR*,omniIdentity*); virtual _CORBA_Boolean is_a(const char*) const; }; """ ## ## Interface Impl class ## interface_impl = """\ class _impl_@name@ : @inherits@ { public: virtual ~_impl_@name@(); @operations@ public: // Really protected, workaround for xlC virtual _CORBA_Boolean _dispatch(omniCallHandle&); private: virtual void* _ptrToInterface(const char*); virtual const char* _mostDerivedRepoId(); @abstract@ }; """ interface_impl_abstract = """\ virtual void _interface_is_abstract() = 0;""" interface_impl_not_abstract = """\ virtual void _interface_is_abstract();""" ## ## Old BOA skeleton class ## interface_sk = """\ class _sk_@name@ : public virtual _impl_@name@, @inherits@ { public: _sk_@name@() {} _sk_@name@(const omniOrbBoaKey&); virtual ~_sk_@name@(); inline @name@::_ptr_type _this() { return (@name@::_ptr_type) omniOrbBoaServant::_this(@name@::_PD_repoId); } }; """ ## ## Objref marshal function ## interface_marshal_forward = """\ inline void @name@::_marshalObjRef(::@name@_ptr obj, cdrStream& s) { omniObjRef::_marshal(obj->_PR_getobj(),s); } """ abstract_interface_marshal_forward = """\ inline void @name@::_marshalObjRef(::@name@_ptr obj, cdrStream& s) { if (obj) { ::CORBA::ValueBase* v = obj->_NP_to_value(); if (v) { s.marshalBoolean(0); ::CORBA::ValueBase::_NP_marshal(v,s); return; } ::CORBA::Object_ptr o = obj->_NP_to_object(); if (o) { s.marshalBoolean(1); omniObjRef::_marshal(o->_PR_getobj(),s); return; } } s.marshalBoolean(0); ::CORBA::ValueBase::_NP_marshal(0, s); } """ ## ## Typedefs ## typedef_simple_to_array = """\ typedef @base@ @derived@; typedef @base@_slice @derived@_slice; typedef @base@_copyHelper @derived@_copyHelper; typedef @base@_var @derived@_var; typedef @base@_out @derived@_out; typedef @base@_forany @derived@_forany; @inline_qualifier@ @derived@_slice* @derived@_alloc() { return @base@_alloc(); } @inline_qualifier@ @derived@_slice* @derived@_dup(const @derived@_slice* p) { return @base@_dup(p); } @inline_qualifier@ void @derived@_copy( @derived@_slice* _to, const @derived@_slice* _from ) { @base@_copy(_to, _from); } @inline_qualifier@ void @derived@_free( @derived@_slice* p) { @base@_free(p); } """ typedef_simple_string = """\ typedef char* @name@; typedef ::CORBA::String_var @name@_var; typedef ::CORBA::String_out @name@_out; """ typedef_simple_wstring = """\ typedef ::CORBA::WChar* @name@; typedef ::CORBA::WString_var @name@_var; typedef ::CORBA::WString_out @name@_out; """ typedef_simple_typecode = """\ typedef ::CORBA::TypeCode_ptr @name@_ptr; typedef ::CORBA::TypeCode_var @name@_var; """ typedef_simple_any = """\ typedef ::CORBA::Any @name@; typedef ::CORBA::Any_var @name@_var; typedef ::CORBA::Any_out @name@_out; """ typedef_simple_fixed = """\ typedef _omni_Fixed<@digits@,@scale@> @name@; typedef @name@& @name@_out; """ typedef_simple_basic = """\ typedef @base@ @derived@; typedef @base@_out @derived@_out; """ typedef_simple_constructed = """\ typedef @base@ @name@; typedef @base@_var @name@_var; typedef @base@_out @name@_out; """ typedef_simple_objref = """\ typedef @base@ @name@; typedef @base@_ptr @name@_ptr; typedef @base@Ref @name@Ref; @impl_base@ typedef @base@_Helper @name@_Helper; @objref_base@ typedef @base@_var @name@_var; typedef @base@_out @name@_out; """ typedef_enum_oper_friend = """\ // Need to declare <<= for elem type, as GCC expands templates early #if defined(__GNUG__) && __GNUG__ == 2 && __GNUC_MINOR__ == 7 @friend@ inline void operator >>= (@element@, cdrStream&); @friend@ inline void operator <<= (@element@&, cdrStream&); #endif """ # Arrays typedef_array = """\ typedef @type@ @name@@dims@; typedef @type@ @name@_slice@taildims@; @inline_qualifier@ @name@_slice* @name@_alloc() { return new @name@_slice[@firstdim@]; } @inline_qualifier@ @name@_slice* @name@_dup(const @name@_slice* _s) { if (!_s) return 0; @name@_slice* _data = @name@_alloc(); if (_data) { @dup_loop@ } return _data; } @inline_qualifier@ void @name@_copy(@name@_slice* _to, const @name@_slice* _from){ @copy_loop@ } @inline_qualifier@ void @name@_free(@name@_slice* _s) { delete [] _s; } """ typedef_array_copyHelper = """\ class @name@_copyHelper { public: static inline @name@_slice* alloc() { return ::@fqname@_alloc(); } static inline @name@_slice* dup(const @name@_slice* p) { return ::@fqname@_dup(p); } static inline void free(@name@_slice* p) { ::@fqname@_free(p); } }; typedef _CORBA_Array_@var_or_fix@_Var<@name@_copyHelper,@name@_slice> @name@_var; typedef _CORBA_Array_@var_or_fix@_Forany<@name@_copyHelper,@name@_slice> @name@_forany; """ typedef_array_fix_out_type = """\ typedef @name@_slice* @name@_out; """ typedef_array_variable_out_type = """\ typedef _CORBA_Array_Variable_OUT_arg<@name@_slice,@name@_var > @name@_out; """ ## ## Sequences ## sequence_type = """\ class @name@_var; class @name@ : public @derived@ { public: typedef @name@_var _var_type; inline @name@() {} inline @name@(const @name@& _s) : @derived@(_s) {} @bounds@ inline @name@& operator = (const @name@& _s) { @derived@::operator=(_s); return *this; } }; """ sequence_forward_type = """\ class @name@_var; class @name@ : public @derived@ { public: typedef @name@_var _var_type; inline @name@() {} @name@(const @name@& _s); @name@& operator=(const @name@& _s); @bounds@ virtual ~@name@(); @element@& operator[] (_CORBA_ULong _index); const @element@& operator[] (_CORBA_ULong _index) const; static @element@* allocbuf(_CORBA_ULong _nelems); static void freebuf(@element@* _b); void operator>>= (cdrStream &_s) const; void operator<<= (cdrStream &_s); protected: void NP_copybuffer(_CORBA_ULong _newmax); void NP_freebuf(); }; """ sequence_unbounded_ctors = """\ inline @name@(_CORBA_ULong _max) : @derived@(_max) {} inline @name@(_CORBA_ULong _max, _CORBA_ULong _len, @element@* _val, _CORBA_Boolean _rel=0) : @derived@(_max, _len, _val, _rel) {} """ sequence_bounded_ctors = """\ inline @name@(_CORBA_ULong _len, @element@* _val, _CORBA_Boolean _rel=0) : @derived@(_len, _val, _rel) {} """ sequence_var_array_subscript = """\ inline @element@_slice* operator [] (_CORBA_ULong _s) { return (@element@_slice*) ((_pd_seq->NP_data())[_s]); } """ sequence_var_subscript = """\ inline @element@ operator [] (_CORBA_ULong _s) { return (*_pd_seq)[_s]; } """ sequence_var = """\ class @name@_out; class @name@_var { public: inline @name@_var() : _pd_seq(0) {} inline @name@_var(@name@* _s) : _pd_seq(_s) {} inline @name@_var(const @name@_var& _s) { if( _s._pd_seq ) _pd_seq = new @name@(*_s._pd_seq); else _pd_seq = 0; } inline ~@name@_var() { if( _pd_seq ) delete _pd_seq; } inline @name@_var& operator = (@name@* _s) { if( _pd_seq ) delete _pd_seq; _pd_seq = _s; return *this; } inline @name@_var& operator = (const @name@_var& _s) { if( _s._pd_seq ) { if( !_pd_seq ) _pd_seq = new @name@; *_pd_seq = *_s._pd_seq; } else if( _pd_seq ) { delete _pd_seq; _pd_seq = 0; } return *this; } @subscript_operator@ inline @name@* operator -> () { return _pd_seq; } inline const @name@* operator -> () const { return _pd_seq; } #if defined(__GNUG__) inline operator @name@& () const { return *_pd_seq; } #else inline operator const @name@& () const { return *_pd_seq; } inline operator @name@& () { return *_pd_seq; } #endif inline const @name@& in() const { return *_pd_seq; } inline @name@& inout() { return *_pd_seq; } inline @name@*& out() { if( _pd_seq ) { delete _pd_seq; _pd_seq = 0; } return _pd_seq; } inline @name@* _retn() { @name@* tmp = _pd_seq; _pd_seq = 0; return tmp; } friend class @name@_out; private: @name@* _pd_seq; }; """ sequence_out_array_subscript = """\ inline @element@_slice* operator [] (_CORBA_ULong _i) { return (@element@_slice*) ((_data->NP_data())[_i]); } """ sequence_out_subscript = """\ inline @element@ operator [] (_CORBA_ULong _i) { return (*_data)[_i]; } """ sequence_out = """\ class @name@_out { public: inline @name@_out(@name@*& _s) : _data(_s) { _data = 0; } inline @name@_out(@name@_var& _s) : _data(_s._pd_seq) { _s = (@name@*) 0; } inline @name@_out(const @name@_out& _s) : _data(_s._data) {} inline @name@_out& operator = (const @name@_out& _s) { _data = _s._data; return *this; } inline @name@_out& operator = (@name@* _s) { _data = _s; return *this; } inline operator @name@*&() { return _data; } inline @name@*& ptr() { return _data; } inline @name@* operator->() { return _data; } @subscript_operator@ @name@*& _data; private: @name@_out(); @name@_out& operator=(const @name@_var&); }; """ ## ## Structs ## struct = """\ struct @name@ { typedef _CORBA_ConstrType_@fix_or_var@_Var<@name@> _var_type; @Other_IDL@ @members@ void operator>>= (cdrStream &) const; void operator<<= (cdrStream &); }; typedef @name@::_var_type @name@_var; """ struct_fix_out_type = """\ typedef @name@& @name@_out; """ struct_variable_out_type = """\ typedef _CORBA_ConstrType_Variable_OUT_arg< @name@,@name@_var > @name@_out; """ struct_array_declarator = """\ typedef @memtype@ @prefix@_@cxx_id@@dims@; typedef @memtype@ _@cxx_id@_slice@tail_dims@; """ struct_nonarray_sequence = """\ typedef @memtype@ _@cxx_id@_seq; _@cxx_id@_seq @cxx_id@; """ struct_normal_member = """\ @memtype@ @cxx_id@@dims@; """ struct_forward = """\ struct @name@; """ ## ## Exceptions ## exception = """\ class @name@ : public ::CORBA::UserException { public: @Other_IDL@ @members@ inline @name@() { pd_insertToAnyFn = insertToAnyFn; pd_insertToAnyFnNCP = insertToAnyFnNCP; } @name@(const @name@&); @constructor@ @name@& operator=(const @name@&); virtual ~@name@(); virtual void _raise() const; static @name@* _downcast(::CORBA::Exception*); static const @name@* _downcast(const ::CORBA::Exception*); static inline @name@* _narrow(::CORBA::Exception* _e) { return _downcast(_e); } @inline@void operator>>=(cdrStream&) const @body@ @inline@void operator<<=(cdrStream&) @body@ static _core_attr insertExceptionToAny insertToAnyFn; static _core_attr insertExceptionToAnyNCP insertToAnyFnNCP; virtual ::CORBA::Exception* _NP_duplicate() const; static _core_attr const char* _PD_repoId; static _core_attr const char* _PD_typeId; private: virtual const char* _NP_typeId() const; virtual const char* _NP_repoId(int*) const; virtual void _NP_marshal(cdrStream&) const; }; """ exception_array_declarator = """\ typedef @memtype@ @private_prefix@_@cxx_id@@dims@; typedef @memtype@ _@cxx_id@_slice@tail_dims@; """ exception_member = """\ @memtype@ @cxx_id@@dims@; """ ## ## Unions ## union_ctor_nonexhaustive = """\ if ((_pd__default = _value._pd__default)) { @default@ } else { switch(_value._pd__d) { @cases@ } } _pd__d = _value._pd__d; """ union_ctor_exhaustive = """\ switch(_value._pd__d) { @cases@ } _pd__d = _value._pd__d;""" union_ctor_case = """\ case @discrimvalue@: @name@(_value._pd_@name@); break; """ union_ctor_bool_default = """\ #ifndef HAS_Cplusplus_Bool default: break; #endif """ union_ctor_default = """\ default: break; """ union = """\ class @unionname@ { public: typedef _CORBA_ConstrType_@fixed@_Var<@unionname@> _var_type; @Other_IDL@ @unionname@(): _pd__initialised(0) { @default_constructor@ } @unionname@(const @unionname@& _value) { _pd__initialised = _value._pd__initialised; @copy_constructor@ } ~@unionname@() {} @unionname@& operator=(const @unionname@& _value) { _pd__initialised = _value._pd__initialised; @copy_constructor@ return *this; } @discrimtype@ _d() const { return _pd__d;} void _d(@discrimtype@ _value){ @_d_body@ } @implicit_default@ @members@ void operator>>= (cdrStream&) const; void operator<<= (cdrStream&); private: @discrimtype@ _pd__d; _CORBA_Boolean _pd__default; _CORBA_Boolean _pd__initialised; @union@ @outsideUnion@ }; typedef @unionname@::_var_type @unionname@_var; """ union_fix_out_type = """\ typedef @unionname@& @unionname@_out; """ union_variable_out_type = """\ typedef _CORBA_ConstrType_Variable_OUT_arg< @unionname@,@unionname@_var > @unionname@_out; """ union_union = """\ union { @members@ }; """ union_d_fn_body = """\ // illegal to set discriminator before making a member active if (!_pd__initialised) OMNIORB_THROW(BAD_PARAM,_OMNI_NS(BAD_PARAM_InvalidUnionDiscValue),::CORBA::COMPLETED_NO); if (_value == _pd__d) return; // no change @switch@ fail: OMNIORB_THROW(BAD_PARAM,_OMNI_NS(BAD_PARAM_InvalidUnionDiscValue),::CORBA::COMPLETED_NO); """ union_constructor_implicit = """\ _default(); """ union_constructor_default = """\ _pd__default = 1; _pd__d = @default@; """ union_implicit_default = """\ void _default() { _pd__initialised = 1; _pd__d = @arbitraryDefault@; _pd__default = 1; } """ union_proxy_float = """ #ifdef USING_PROXY_FLOAT @type@ _pd_@name@@dims@; #endif """ union_noproxy_float = """ #ifndef USING_PROXY_FLOAT @type@ _pd_@name@@dims@; #endif """ union_array_declarator = """\ typedef @memtype@ @prefix@_@name@@dims@; typedef @memtype@ _@name@_slice@tail_dims@; """ union_array = """\ const @memtype@_slice *@name@ () const { return _pd_@name@; } void @name@ (const @const_type@ _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; @loop@ } """ union_any = """\ const @type@ &@name@ () const { return _pd_@name@; } @type@ &@name@ () { return _pd_@name@; } void @name@ (const @type@& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } """ union_typecode = """\ ::CORBA::TypeCode_ptr @name@ () const { return _pd_@name@._ptr; } void @name@(::CORBA::TypeCode_ptr _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = ::CORBA::TypeCode::_duplicate(_value); } void @name@(const ::CORBA::TypeCode_member& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } void @name@(const ::CORBA::TypeCode_var& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } """ union_basic = """\ @type@ @name@ () const { return _pd_@name@; } void @name@ (@type@ _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } """ union_string = """\ const char * @name@ () const { return (const char*) _pd_@name@; } void @name@(char* _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } void @name@(const char* _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } void @name@(const ::CORBA::String_var& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } void @name@(const ::CORBA::String_member& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } """ union_wstring = """\ const ::CORBA::WChar * @name@ () const { return (const ::CORBA::WChar*) _pd_@name@; } void @name@(::CORBA::WChar* _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } void @name@(const ::CORBA::WChar* _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } void @name@(const ::CORBA::WString_var& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } void @name@(const ::CORBA::WString_member& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } """ union_objref = """\ @ptr_name@ @member@ () const { return _pd_@member@._ptr; } void @member@(@ptr_name@ _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; @Helper_name@::duplicate(_value); _pd_@member@ = _value; } void @member@(const @memtype@& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@member@ = _value; } void @member@(const @var_name@& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@member@ = _value; } """ union_constructed = """\ const @type@ &@name@ () const { return _pd_@name@; } @type@ &@name@ () { return _pd_@name@; } void @name@ (const @type@& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@name@ = _value; } """ union_sequence = """\ typedef @sequence_template@ _@member@_seq; const _@member@_seq& @member@ () const { return _pd_@member@; } _@member@_seq& @member@ () { return _pd_@member@; } void @member@ (const _@member@_seq& _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; _pd_@member@ = _value; } """ union_value = """\ @type@* @member@() const { return _pd_@member@.in(); } void @member@(@type@* _value) { _pd__initialised = 1; _pd__d = @discrimvalue@; _pd__default = @isDefault@; ::CORBA::add_ref(_value); _pd_@member@ = _value; } """ union_member = """\ @type@ _pd_@name@@dims@; """ union_forward = """\ class @name@; """ ## ## Enum ## enum = """\ enum @name@ { @memberlist@ /*, __max_@name@=0xffffffff */ }; typedef @name@& @name@_out; """ ## ## Const ## const_inclass_isinteger = """\ static _core_attr const @type@ @name@ _init_in_cldecl_( = @val@ ); """ const_inclass_notinteger = """\ static _core_attr const @type@ @name@; """ const_outsideclass_isinteger = """\ _CORBA_@where@_VARINT const @type@ @name@ _init_in_decl_( = @val@ ); """ const_outsideclass_notinteger = """\ _CORBA_@where@_VAR _core_attr const @type@ @name@; """ ## ## Typecode_ptr ## typecode = """\ @qualifier@ _dyn_attr const ::CORBA::TypeCode_ptr _tc_@name@; """ ## ## Operators ## any_struct = """\ extern void operator<<=(::CORBA::Any& _a, const @fqname@& _s); extern void operator<<=(::CORBA::Any& _a, @fqname@* _sp); extern _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, @fqname@*& _sp); extern _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, const @fqname@*& _sp); """ any_exception = """\ void operator<<=(::CORBA::Any& _a, const @fqname@& _s); void operator<<=(::CORBA::Any& _a, const @fqname@* _sp); _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, const @fqname@*& _sp); """ any_union = """\ void operator<<=(::CORBA::Any& _a, const @fqname@& _s); void operator<<=(::CORBA::Any& _a, @fqname@* _sp); _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, const @fqname@*& _sp); _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, @fqname@*& _sp); """ any_enum = """\ void operator<<=(::CORBA::Any& _a, @name@ _s); _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, @name@& _s); """ any_interface = """\ void operator<<=(::CORBA::Any& _a, @fqname@_ptr _s); void operator<<=(::CORBA::Any& _a, @fqname@_ptr* _s); _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, @fqname@_ptr& _s); """ any_array_declarator = """\ void operator<<=(::CORBA::Any& _a, const @fqname@_forany& _s); _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, @fqname@_forany& _s); """ any_sequence = """\ void operator<<=(::CORBA::Any& _a, const @fqname@& _s); void operator<<=(::CORBA::Any& _a, @fqname@* _sp); _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, @fqname@*& _sp); _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, const @fqname@*& _sp); """ any_value = """\ void operator<<=(::CORBA::Any& _a, @fqname@* _s); void operator<<=(::CORBA::Any& _a, @fqname@** _s); _CORBA_Boolean operator>>=(const ::CORBA::Any& _a, @fqname@*& _s); """ enum_operators = """\ inline void operator >>=(@name@ _e, cdrStream& s) { ::operator>>=((::CORBA::ULong)_e, s); } inline void operator <<= (@name@& _e, cdrStream& s) { ::CORBA::ULong @private_prefix@_e; ::operator<<=(@private_prefix@_e,s); if (@private_prefix@_e <= @last_item@) { _e = (@name@) @private_prefix@_e; } else { OMNIORB_THROW(MARSHAL,_OMNI_NS(MARSHAL_InvalidEnumValue), (::CORBA::CompletionStatus)s.completion()); } } """ ## ## tie template ## tie_template = """\ template <class _omniT> class @tie_name@ : public virtual @inherits@ { public: @tie_name@(_omniT& t) : pd_obj(&t), pd_poa(0), pd_rel(0) {} @tie_name@(_omniT& t, ::PortableServer::POA_ptr p) : pd_obj(&t), pd_poa(p), pd_rel(0) {} @tie_name@(_omniT* t, _CORBA_Boolean r=1) : pd_obj(t), pd_poa(0), pd_rel(r) {} @tie_name@(_omniT* t, ::PortableServer::POA_ptr p,_CORBA_Boolean r=1) : pd_obj(t), pd_poa(p), pd_rel(r) {} ~@tie_name@() { if( pd_poa ) ::CORBA::release(pd_poa); if( pd_rel ) delete pd_obj; } _omniT* _tied_object() { return pd_obj; } void _tied_object(_omniT& t) { if( pd_rel ) delete pd_obj; pd_obj = &t; pd_rel = 0; } void _tied_object(_omniT* t, _CORBA_Boolean r=1) { if( pd_rel ) delete pd_obj; pd_obj = t; pd_rel = r; } _CORBA_Boolean _is_owner() { return pd_rel; } void _is_owner(_CORBA_Boolean io) { pd_rel = io; } ::PortableServer::POA_ptr _default_POA() { if( !pd_poa ) return ::PortableServer::POA::_the_root_poa(); else return ::PortableServer::POA::_duplicate(pd_poa); } @callables@ private: _omniT* pd_obj; ::PortableServer::POA_ptr pd_poa; _CORBA_Boolean pd_rel; }; """ tie_template_old = """\ template <class _omniT, _CORBA_Boolean release> class @tie_name@ : public virtual @inherits@ { public: @tie_name@(_omniT& t) : pd_obj(&t), pd_rel(release) {} @tie_name@(_omniT* t) : pd_obj(t), pd_rel(release) {} ~@tie_name@() { if( pd_rel ) delete pd_obj; } @callables@ private: _omniT* pd_obj; _CORBA_Boolean pd_rel; }; """ ## ## tc_string ## tcstring = """\ #if !defined(___tc_string_@n@__) && !defined(DISABLE_Unnamed_Bounded_String_TC) #define ___tc_string_@n@__ _CORBA_GLOBAL_VAR _dyn_attr const ::CORBA::TypeCode_ptr _tc_string_@n@; #endif """ ## ## tc_wstring ## tcwstring = """\ #if !defined(___tc_wstring_@n@__) && !defined(DISABLE_Unnamed_Bounded_WString_TC) #define ___tc_wstring_@n@__ _CORBA_GLOBAL_VAR _dyn_attr const ::CORBA::TypeCode_ptr _tc_wstring_@n@; #endif """
ogata-lab/rtmsdk-mac
x86_64/lib/python2.7/site-packages/omniidl_be/cxx/header/template.py
Python
lgpl-2.1
39,776
0.001282
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Multiple documentation build configuration file, created by # sphinx-quickstart on Thu Apr 14 09:34:49 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', # 'sphinx.ext.doctest', # 'sphinx.ext.todo', # 'sphinx.ext.coverage', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Multiple' copyright = '2016, Wevolver' author = 'Wevolver' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0' # The full version, including alpha/beta/rc tags. release = '0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. # "<project> v<release> documentation" by default. #html_title = 'Multiple v0' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. #html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Multipledoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Multiple.tex', 'Multiple Documentation', 'Wevolver', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'multiple', 'Multiple Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Multiple', 'Multiple Documentation', author, 'Multiple', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
Wevolver/HAVE
docs/source/conf.py
Python
gpl-3.0
9,459
0.006026
sum = 0 for i in range(1, 1000): if i % 3 == 0 or i % 5 == 0: sum += i print sum
huangshenno1/algo
project_euler/1.py
Python
mit
84
0.02381
""" Django rules for student roles """ from __future__ import absolute_import import rules from lms.djangoapps.courseware.access import has_access from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag, WaffleFlag, WaffleFlagNamespace from .roles import CourseDataResearcherRole # Waffle flag to enable the separate course outline page and full width content. RESEARCHER_ROLE = CourseWaffleFlag(WaffleFlagNamespace(name='instructor'), 'researcher') @rules.predicate def can_access_reports(user, course_id): """ Returns whether the user can access the course data downloads. """ is_staff = user.is_staff if RESEARCHER_ROLE.is_enabled(course_id): return is_staff or CourseDataResearcherRole(course_id).has_user(user) else: return is_staff or has_access(user, 'staff', course_id) rules.add_perm('student.can_research', can_access_reports)
cpennington/edx-platform
common/djangoapps/student/rules.py
Python
agpl-3.0
894
0.004474
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/component/weapon/shared_projectile_feed_mechanism.iff" result.attribute_template_id = -1 result.stfName("craft_weapon_ingredients_n","projectile_feed_mechanism") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
obi-two/Rebelion
data/scripts/templates/object/tangible/component/weapon/shared_projectile_feed_mechanism.py
Python
mit
498
0.044177
""" Entry point to API application. This will be for running simple checks on the application """ from flask import jsonify, url_for, redirect, request from flask_login import current_user from . import home from ..__meta__ import __version__, __project__, __copyright__ @home.route("") @home.route("home") @home.route("index") def index(): """ Entry point into the app :return: renders the api information """ return jsonify({ "version": __version__, "project": __project__, "copyright": __copyright__ })
BrianLusina/Arco
server/app/mod_home/views.py
Python
mit
557
0.001795
from django.contrib import messages from django.views.generic.base import ContextMixin from edc_constants.constants import OPEN from ..models import DataActionItem from ..model_wrappers import DataActionItemModelWrapper from .user_details_check_view_mixin import UserDetailsCheckViewMixin class DataActionItemsViewMixin(UserDetailsCheckViewMixin, ContextMixin): data_action_item_template = 'edc_data_manager/data_manager.html' @property def data_action_item(self): """Returns a wrapped saved or unsaved consent version. """ model_obj = DataActionItem(subject_identifier=self.subject_identifier) return DataActionItemModelWrapper(model_obj=model_obj) def data_action_items(self): """Return a list of action items. """ wrapped_data_action_items = [] status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier, status__in=status).order_by('issue_number') for data_action_item in data_action_items: wrapped_data_action_items.append(data_action_item) return wrapped_data_action_items def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) status = [OPEN, 'stalled', 'resolved'] data_action_items = DataActionItem.objects.filter( subject_identifier=self.subject_identifier, status__in=status).order_by('issue_number') msg = '' for data_action_item in data_action_items: msg = (f'Issue {data_action_item.issue_number}. Pending action' f' created by {data_action_item.user_created}. ' f'{data_action_item.subject} Assigned to ' f'{data_action_item.assigned}') messages.add_message( self.request, messages.ERROR, msg) context.update( data_action_item_template=self.data_action_item_template, data_action_item_add_url=self.data_action_item.href, data_action_items=self.data_action_items) return context
botswana-harvard/edc-data-manager
edc_data_manager/view_mixins/data_manager_view_mixin.py
Python
gpl-2.0
2,162
0
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def check_c_count(expected_count): test.assertEqual(expected_count, len(reality.resources_by_logical_name('C'))) example_template = Template({ 'A': RsrcDef({'a': 'initial'}, []), 'B': RsrcDef({}, []), 'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']), 'D': RsrcDef({'c': GetRes('C')}, []), 'E': RsrcDef({'ca': GetAtt('C', '!a')}, []), }) engine.create_stack('foo', example_template) engine.noop(5) engine.call(verify, example_template) example_template2 = Template({ 'A': RsrcDef({'a': 'updated'}, []), 'B': RsrcDef({}, []), 'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']), 'D': RsrcDef({'c': GetRes('C')}, []), 'E': RsrcDef({'ca': GetAtt('C', '!a')}, []), }) engine.update_stack('foo', example_template2) engine.noop(4) engine.rollback_stack('foo') engine.call(check_c_count, 2) engine.noop(11) engine.call(verify, example_template) engine.delete_stack('foo') engine.noop(12) engine.call(verify, Template({}))
dims/heat
heat/tests/convergence/scenarios/update_replace_rollback.py
Python
apache-2.0
1,550
0.000645
""" """ import os import pandas from matplotlib import pyplot from matplotlib import dates as mdates import matplotlib.ticker as mticker PROCESSED_DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), os.pardir, "processed_data" ) def insert_nans(station_df): """ Only when NaNs are present, the line is discontinued. :param station_df: :return: """ reference_df = pandas.DataFrame( index=pandas.date_range(station_df.index[0], station_df.index[-1], freq='H', name="datetime"), ) return station_df.join(reference_df, how="outer") class GermanDateFormatter(mdates.DateFormatter): """ As the Windows locales are wrong (no dot after abbreviations like what the Duden tells us to do) this is the home-brew solution """ def __init__(self): super().__init__(self) import locale locale.setlocale(locale.LC_ALL, 'de') self.month_formatter = mdates.DateFormatter('%b') def strftime(self, dt, fmt=None): windows_month_name = dt.strftime("%b") if windows_month_name == "Mrz": return "März" if windows_month_name == "Mai": return "Mai" if windows_month_name == "Jun": return "Juni" if windows_month_name == "Jul": return "Juli" if windows_month_name == "Sep": return "Sept." abbreviated_month_name = windows_month_name + "." return abbreviated_month_name def style_year_2016_plot(ax): ax.set_ylabel('Temperatur (°C)') ax.set_xlabel('2016') ax.margins(x=0) ax.yaxis.set_major_locator(mticker.MultipleLocator(5)) # draw line every 5 °C pyplot.grid(color='.9') # a very light gray ax.xaxis.set_major_locator(mdates.MonthLocator()) ax.xaxis.set_major_formatter(GermanDateFormatter())
1kastner/analyse_weather_data
plot_weather_data/__init__.py
Python
agpl-3.0
1,869
0.003215
# Touchy is Copyright (c) 2009 Chris Radek <chris@timeguy.com> # # Touchy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Touchy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # self.mcodes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 30, 48, 49, 50, 51, # 52, 53, 60, 61, 62, 63, 64, 65, 66, 67, 68) # # self.gcodes = (0, 10, 20, 30, 40, 50, 51, 52, 53, 70, 80, 100, # 170, 171, 180, 181, 190, 191, 200, 210, 280, 281, # 300, 301, 330, 331, 382, 383, 384, 385, 400, 410, # 411, 420, 421, 430, 431, 490, 530, 540, 550, 560, # 570, 580, 590, 591, 592, 593, 610, 611, 640, 730, # 760, 800, 810, 820, 830, 840, 850, 860, 870, 880, # 890, 900, 901, 910, 911, 920, 921, 922, 923, 930, # 940, 950, 960, 970, 980, 990) class mdi: def __init__(self, emc): self.clear() self.emc = emc self.emcstat = emc.stat() self.emccommand = emc.command() self.emcstat.poll() am = self.emcstat.axis_mask self.axes = [] self.polar = 0 axisnames = ['X', 'Y', 'Z', 'A', 'B', 'C', 'U', 'V', 'W'] for i in range(9): if am & (1<<i): self.axes.append(axisnames[i]) self.gcode = 'M2' self.codes = { 'M3' : [_('Spindle CW'), 'S'], 'M4' : [_('Spindle CCW'), 'S'], 'M6' : [_('Tool change'), 'T'], 'M61' : [_('Set tool number'), 'Q'], 'M66' : [_('Input control'), 'P', 'E', 'L', 'Q'], # 'A' means 'the axes' 'G0' : [_('Straight rapid'), 'A'], 'G00' : [_('Straight rapid'), 'A'], 'G1' : [_('Straight feed'), 'A', 'F'], 'G01' : [_('Straight feed'), 'A', 'F'], 'G2' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'], 'G02' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'], 'G3' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'], 'G03' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'], 'G4' : [_('Dwell'), 'P'], 'G04' : [_('Dwell'), 'P'], 'G10' : [_('Setup'), 'L', 'P', 'A', 'Q', 'R'], 'G33' : [_('Spindle synchronized feed'), 'A', 'K'], 'G33.1' : [_('Rigid tap'), 'Z', 'K'], 'G38.2' : [_('Probe'), 'A', 'F'], 'G38.3' : [_('Probe'), 'A', 'F'], 'G38.4' : [_('Probe'), 'A', 'F'], 'G38.5' : [_('Probe'), 'A', 'F'], 'G41' : [_('Radius compensation left'), 'D'], 'G42' : [_('Radius compensation right'), 'D'], 'G41.1' : [_('Radius compensation left, immediate'), 'D', 'L'], 'G42.1' : [_('Radius compensation right, immediate'), 'D', 'L'], 'G43' : [_('Tool length offset'), 'H'], 'G43.1' : [_('Tool length offset immediate'), 'I', 'K'], 'G53' : [_('Motion in unoffset coordinates'), 'G', 'A', 'F'], 'G64' : [_('Continuous mode'), 'P'], 'G76' : [_('Thread'), 'Z', 'P', 'I', 'J', 'K', 'R', 'Q', 'H', 'E', 'L'], 'G81' : [_('Drill'), 'A', 'R', 'L', 'F'], 'G82' : [_('Drill with dwell'), 'A', 'R', 'L', 'P', 'F'], 'G83' : [_('Peck drill'), 'A', 'R', 'L', 'Q', 'F'], 'G73' : [_('Chip-break drill'), 'A', 'R', 'L', 'Q', 'F'], 'G85' : [_('Bore'), 'A', 'R', 'L', 'F'], 'G89' : [_('Bore with dwell'), 'A', 'R', 'L', 'P', 'F'], 'G92' : [_('Offset all coordinate systems'), 'A'], 'G96' : [_('CSS Mode'), 'S', 'D'], } self.ocodes = [] def add_macros(self, macros): for m in macros: words = m.split() call = "O<%s> call" % words[0] args = [''] + [w + ' ' for w in words[1:]] self.ocodes.append(call) self.codes[call] = args def get_description(self, gcode): return self.codes[gcode][0] def get_words(self, gcode): self.gcode = gcode if gcode[0] == 'M' and gcode.find(".") == -1 and int(gcode[1:]) >= 100 and int(gcode[1:]) <= 199: return ['P', 'Q'] if not self.codes.has_key(gcode): return [] # strip description words = self.codes[gcode][1:] # replace A with the real axis names if 'A' in words: i = words.index('A') words = words[:i] + self.axes + words[i+1:] if self.polar and 'X' in self.axes and 'Y' in self.axes: words[self.axes.index('X')] = '@' words[self.axes.index('Y')] = '^' return words def clear(self): self.words = {} def set_word(self, word, value): self.words[word] = value def set_polar(self, p): self.polar = p; def issue(self): m = self.gcode if m.lower().startswith('o'): codes = self.codes[m] for code in self.codes[m][1:]: v = self.words[code] or "0" m = m + " [%s]" % v else: w = [i for i in self.words if len(self.words.get(i)) > 0] if '@' in w: m += '@' + self.words.get('@') w.remove('@') if '^' in w: m += '^' + self.words.get('^') w.remove('^') for i in w: if len(self.words.get(i)) > 0: m += i + self.words.get(i) self.emcstat.poll() if self.emcstat.task_mode != self.emc.MODE_MDI: self.emccommand.mode(self.emc.MODE_MDI) self.emccommand.wait_complete() self.emccommand.mdi(m) class mdi_control: def __init__(self, gtk, emc, labels, eventboxes): self.labels = labels self.eventboxes = eventboxes self.numlabels = len(labels) self.numwords = 1 self.selected = 0 self.gtk = gtk self.mdi = mdi(emc) for i in range(self.numlabels): self.not_editing(i) self.editing(self.selected) self.set_text("G") def not_editing(self, n): e = self.eventboxes[n] e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#ccc")) def editing(self, n): self.not_editing(self.selected) self.selected = n e = self.eventboxes[n] e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#fff")) def get_text(self): w = self.labels[self.selected] return w.get_text() def set_text(self, t, n = -1): if n == -1: n = self.selected w = self.labels[n] w.set_text(t) if n > 0: head = t.rstrip("0123456789.-") tail = t[len(head):] self.mdi.set_word(head, tail) if len(t) < 2: w.set_alignment(1.0, 0.5) else: w.set_alignment(0.0, 0.5) def clear(self, b): t = self.get_text() self.set_text(t.rstrip("0123456789.-")) def back(self, b): t = self.get_text() if t[-1:] in "0123456789.-": self.set_text(t[:-1]) def fill_out(self): if self.selected == 0: w = self.mdi.get_words(self.get_text()) self.numwords = len(w) for i in range(1,self.numlabels): if i <= len(w): self.set_text(w[i-1], i) else: self.set_text("", i) def next(self, b): self.fill_out(); if self.numwords > 0: self.editing(max(1,(self.selected+1) % (self.numwords+1))) def ok(self, b): self.fill_out(); self.mdi.issue() def decimal(self, b): t = self.get_text() if t.find(".") == -1: self.set_text(t + ".") def minus(self, b): t = self.get_text() if self.selected > 0: head = t.rstrip("0123456789.-") tail = t[len(head):] if tail.find("-") == -1: self.set_text(head + "-" + tail) else: self.set_text(head + tail[1:]) def keypad(self, b): t = self.get_text() num = b.get_name() self.set_text(t + num) def gp(self, b): self.g(b, "G", 1) def g(self, b, code="G", polar=0): self.mdi.set_polar(polar) self.set_text(code, 0) for i in range(1, self.numlabels): self.set_text("", i) self.editing(0) self.mdi.clear() def m(self, b): self.g(b, "M") def t(self, b): self.g(b, "T") def o(self, b): old_code = self.labels[0].get_text() ocodes = self.mdi.ocodes if old_code in ocodes: j = (ocodes.index(old_code) + 1) % len(ocodes) else: j = 0 self.g(b, ocodes[j]) self.next(b) def select(self, eventbox, event): n = int(eventbox.get_name()[12:]) if self.selected == 0: self.fill_out() if n <= self.numwords: self.editing(n) def set_tool(self, tool, g10l11): self.g(0) self.set_text("G10", 0) self.next(0) if g10l11: self.set_text("L11", 1) else: self.set_text("L10", 1) self.next(0) self.set_text("P%d" % tool, 2) self.next(0) self.next(0) self.next(0) def set_origin(self, system): self.g(0) self.set_text("G10", 0) self.next(0) self.set_text("L20", 1) self.next(0) self.set_text("P%d" % system, 2) self.next(0)
CalvinHsu1223/LinuxCNC-EtherCAT-HAL-Driver
src/emc/usr_intf/touchy/mdi.py
Python
gpl-2.0
10,007
0.006196
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generate a spatial analysis against an arbitrary library. To use, build the 'binary_size_tool' target. Then run this tool, passing in the location of the library to be analyzed along with any other options you desire. """ import collections import json import logging import multiprocessing import optparse import os import re import shutil import struct import subprocess import sys import tempfile import time import binary_size_utils # This path changee is not beautiful. Temporary (I hope) measure until # the chromium project has figured out a proper way to organize the # library of python tools. http://crbug.com/375725 elf_symbolizer_path = os.path.abspath(os.path.join( os.path.dirname(__file__), '..', '..', 'build', 'android', 'pylib')) sys.path.append(elf_symbolizer_path) import symbols.elf_symbolizer as elf_symbolizer # pylint: disable=F0401 # Node dictionary keys. These are output in json read by the webapp so # keep them short to save file size. # Note: If these change, the webapp must also change. NODE_TYPE_KEY = 'k' NODE_NAME_KEY = 'n' NODE_CHILDREN_KEY = 'children' NODE_SYMBOL_TYPE_KEY = 't' NODE_SYMBOL_SIZE_KEY = 'value' NODE_MAX_DEPTH_KEY = 'maxDepth' NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement' # The display name of the bucket where we put symbols without path. NAME_NO_PATH_BUCKET = '(No Path)' # Try to keep data buckets smaller than this to avoid killing the # graphing lib. BIG_BUCKET_LIMIT = 3000 # TODO(andrewhayden): Only used for legacy reports. Delete. def FormatBytes(byte_count): """Pretty-print a number of bytes.""" if byte_count > 1e6: byte_count = byte_count / 1.0e6 return '%.1fm' % byte_count if byte_count > 1e3: byte_count = byte_count / 1.0e3 return '%.1fk' % byte_count return str(byte_count) # TODO(andrewhayden): Only used for legacy reports. Delete. def SymbolTypeToHuman(symbol_type): """Convert a symbol type as printed by nm into a human-readable name.""" return {'b': 'bss', 'd': 'data', 'r': 'read-only data', 't': 'code', 'w': 'weak symbol', 'v': 'weak symbol'}[symbol_type] def _MkChild(node, name): child = node[NODE_CHILDREN_KEY].get(name) if child is None: child = {NODE_NAME_KEY: name, NODE_CHILDREN_KEY: {}} node[NODE_CHILDREN_KEY][name] = child return child def SplitNoPathBucket(node): """NAME_NO_PATH_BUCKET can be too large for the graphing lib to handle. Split it into sub-buckets in that case.""" root_children = node[NODE_CHILDREN_KEY] if NAME_NO_PATH_BUCKET in root_children: no_path_bucket = root_children[NAME_NO_PATH_BUCKET] old_children = no_path_bucket[NODE_CHILDREN_KEY] count = 0 for symbol_type, symbol_bucket in old_children.iteritems(): count += len(symbol_bucket[NODE_CHILDREN_KEY]) if count > BIG_BUCKET_LIMIT: new_children = {} no_path_bucket[NODE_CHILDREN_KEY] = new_children current_bucket = None index = 0 for symbol_type, symbol_bucket in old_children.iteritems(): for symbol_name, value in symbol_bucket[NODE_CHILDREN_KEY].iteritems(): if index % BIG_BUCKET_LIMIT == 0: group_no = (index / BIG_BUCKET_LIMIT) + 1 current_bucket = _MkChild(no_path_bucket, '%s subgroup %d' % (NAME_NO_PATH_BUCKET, group_no)) assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p' node[NODE_TYPE_KEY] = 'p' # p for path index += 1 symbol_size = value[NODE_SYMBOL_SIZE_KEY] AddSymbolIntoFileNode(current_bucket, symbol_type, symbol_name, symbol_size) def MakeChildrenDictsIntoLists(node): largest_list_len = 0 if NODE_CHILDREN_KEY in node: largest_list_len = len(node[NODE_CHILDREN_KEY]) child_list = [] for child in node[NODE_CHILDREN_KEY].itervalues(): child_largest_list_len = MakeChildrenDictsIntoLists(child) if child_largest_list_len > largest_list_len: largest_list_len = child_largest_list_len child_list.append(child) node[NODE_CHILDREN_KEY] = child_list return largest_list_len def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size): """Puts symbol into the file path node |node|. Returns the number of added levels in tree. I.e. returns 2.""" # 'node' is the file node and first step is to find its symbol-type bucket. node[NODE_LAST_PATH_ELEMENT_KEY] = True node = _MkChild(node, symbol_type) assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b' node[NODE_SYMBOL_TYPE_KEY] = symbol_type node[NODE_TYPE_KEY] = 'b' # b for bucket # 'node' is now the symbol-type bucket. Make the child entry. node = _MkChild(node, symbol_name) if NODE_CHILDREN_KEY in node: if node[NODE_CHILDREN_KEY]: logging.warning('A container node used as symbol for %s.' % symbol_name) # This is going to be used as a leaf so no use for child list. del node[NODE_CHILDREN_KEY] node[NODE_SYMBOL_SIZE_KEY] = symbol_size node[NODE_SYMBOL_TYPE_KEY] = symbol_type node[NODE_TYPE_KEY] = 's' # s for symbol return 2 # Depth of the added subtree. def MakeCompactTree(symbols, symbol_path_origin_dir): result = {NODE_NAME_KEY: '/', NODE_CHILDREN_KEY: {}, NODE_TYPE_KEY: 'p', NODE_MAX_DEPTH_KEY: 0} seen_symbol_with_path = False cwd = os.path.abspath(os.getcwd()) for symbol_name, symbol_type, symbol_size, file_path in symbols: if 'vtable for ' in symbol_name: symbol_type = '@' # hack to categorize these separately # Take path like '/foo/bar/baz', convert to ['foo', 'bar', 'baz'] if file_path and file_path != "??": file_path = os.path.abspath(os.path.join(symbol_path_origin_dir, file_path)) # Let the output structure be relative to $CWD if inside $CWD, # otherwise relative to the disk root. This is to avoid # unnecessary click-through levels in the output. if file_path.startswith(cwd + os.sep): file_path = file_path[len(cwd):] if file_path.startswith('/'): file_path = file_path[1:] seen_symbol_with_path = True else: file_path = NAME_NO_PATH_BUCKET path_parts = file_path.split('/') # Find pre-existing node in tree, or update if it already exists node = result depth = 0 while len(path_parts) > 0: path_part = path_parts.pop(0) if len(path_part) == 0: continue depth += 1 node = _MkChild(node, path_part) assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p' node[NODE_TYPE_KEY] = 'p' # p for path depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size) result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth) if not seen_symbol_with_path: logging.warning('Symbols lack paths. Data will not be structured.') # The (no path) bucket can be extremely large if we failed to get # path information. Split it into subgroups if needed. SplitNoPathBucket(result) largest_list_len = MakeChildrenDictsIntoLists(result) if largest_list_len > BIG_BUCKET_LIMIT: logging.warning('There are sections with %d nodes. ' 'Results might be unusable.' % largest_list_len) return result # TODO(andrewhayden): Only used for legacy reports. Delete. def TreeifySymbols(symbols): """Convert symbols into a path-based tree, calculating size information along the way. The result is a dictionary that contains two kinds of nodes: 1. Leaf nodes, representing source code locations (e.g., c++ files) These nodes have the following dictionary entries: sizes: a dictionary whose keys are categories (such as code, data, vtable, etceteras) and whose values are the size, in bytes, of those categories; size: the total size, in bytes, of all the entries in the sizes dict 2. Non-leaf nodes, representing directories These nodes have the following dictionary entries: children: a dictionary whose keys are names (path entries; either directory or file names) and whose values are other nodes; size: the total size, in bytes, of all the leaf nodes that are contained within the children dict (recursively expanded) The result object is itself a dictionary that represents the common ancestor of all child nodes, e.g. a path to which all other nodes beneath it are relative. The 'size' attribute of this dict yields the sum of the size of all leaf nodes within the data structure. """ dirs = {'children': {}, 'size': 0} for sym, symbol_type, size, path in symbols: dirs['size'] += size if path: path = os.path.normpath(path) if path.startswith('/'): path = path[1:] parts = None if path: parts = path.split('/') if parts: assert path file_key = parts.pop() tree = dirs try: # Traverse the tree to the parent of the file node, creating as needed for part in parts: assert part != '' if part not in tree['children']: tree['children'][part] = {'children': {}, 'size': 0} tree = tree['children'][part] tree['size'] += size # Get (creating if necessary) the node for the file # This node doesn't have a 'children' attribute if file_key not in tree['children']: tree['children'][file_key] = {'sizes': collections.defaultdict(int), 'size': 0} tree = tree['children'][file_key] tree['size'] += size # Accumulate size into a bucket within the file symbol_type = symbol_type.lower() if 'vtable for ' in sym: tree['sizes']['[vtable]'] += size elif 'r' == symbol_type: tree['sizes']['[rodata]'] += size elif 'd' == symbol_type: tree['sizes']['[data]'] += size elif 'b' == symbol_type: tree['sizes']['[bss]'] += size elif 't' == symbol_type: # 'text' in binary parlance means 'code'. tree['sizes']['[code]'] += size elif 'w' == symbol_type: tree['sizes']['[weak]'] += size else: tree['sizes']['[other]'] += size except: print >> sys.stderr, sym, parts, file_key raise else: key = 'symbols without paths' if key not in dirs['children']: dirs['children'][key] = {'sizes': collections.defaultdict(int), 'size': 0} tree = dirs['children'][key] subkey = 'misc' if (sym.endswith('::__FUNCTION__') or sym.endswith('::__PRETTY_FUNCTION__')): subkey = '__FUNCTION__' elif sym.startswith('CSWTCH.'): subkey = 'CSWTCH' elif '::' in sym: subkey = sym[0:sym.find('::') + 2] tree['sizes'][subkey] = tree['sizes'].get(subkey, 0) + size tree['size'] += size return dirs # TODO(andrewhayden): Only used for legacy reports. Delete. def JsonifyTree(tree, name): """Convert TreeifySymbols output to a JSON treemap. The format is very similar, with the notable exceptions being lists of children instead of maps and some different attribute names.""" children = [] css_class_map = { '[vtable]': 'vtable', '[rodata]': 'read-only_data', '[data]': 'data', '[bss]': 'bss', '[code]': 'code', '[weak]': 'weak_symbol' } if 'children' in tree: # Non-leaf node. Recurse. for child_name, child in tree['children'].iteritems(): children.append(JsonifyTree(child, child_name)) else: # Leaf node; dump per-file stats as entries in the treemap for kind, size in tree['sizes'].iteritems(): child_json = {'name': kind + ' (' + FormatBytes(size) + ')', 'data': { '$area': size }} css_class = css_class_map.get(kind) if css_class is not None: child_json['data']['$symbol'] = css_class children.append(child_json) # Sort children by size, largest to smallest. children.sort(key=lambda child: -child['data']['$area']) # For leaf nodes, the 'size' attribute is the size of the leaf; # Non-leaf nodes don't really have a size, but their 'size' attribute is # the sum of the sizes of all their children. return {'name': name + ' (' + FormatBytes(tree['size']) + ')', 'data': { '$area': tree['size'] }, 'children': children } def DumpCompactTree(symbols, symbol_path_origin_dir, outfile): tree_root = MakeCompactTree(symbols, symbol_path_origin_dir) with open(outfile, 'w') as out: out.write('var tree_data=') # Use separators without whitespace to get a smaller file. json.dump(tree_root, out, separators=(',', ':')) print('Writing %d bytes json' % os.path.getsize(outfile)) # TODO(andrewhayden): Only used for legacy reports. Delete. def DumpTreemap(symbols, outfile): dirs = TreeifySymbols(symbols) out = open(outfile, 'w') try: out.write('var kTree = ' + json.dumps(JsonifyTree(dirs, '/'))) finally: out.flush() out.close() # TODO(andrewhayden): Only used for legacy reports. Delete. def DumpLargestSymbols(symbols, outfile, n): # a list of (sym, symbol_type, size, path); sort by size. symbols = sorted(symbols, key=lambda x: -x[2]) dumped = 0 out = open(outfile, 'w') try: out.write('var largestSymbols = [\n') for sym, symbol_type, size, path in symbols: if symbol_type in ('b', 'w'): continue # skip bss and weak symbols if path is None: path = '' entry = {'size': FormatBytes(size), 'symbol': sym, 'type': SymbolTypeToHuman(symbol_type), 'location': path } out.write(json.dumps(entry)) out.write(',\n') dumped += 1 if dumped >= n: return finally: out.write('];\n') out.flush() out.close() def MakeSourceMap(symbols): sources = {} for _sym, _symbol_type, size, path in symbols: key = None if path: key = os.path.normpath(path) else: key = '[no path]' if key not in sources: sources[key] = {'path': path, 'symbol_count': 0, 'size': 0} record = sources[key] record['size'] += size record['symbol_count'] += 1 return sources # TODO(andrewhayden): Only used for legacy reports. Delete. def DumpLargestSources(symbols, outfile, n): source_map = MakeSourceMap(symbols) sources = sorted(source_map.values(), key=lambda x: -x['size']) dumped = 0 out = open(outfile, 'w') try: out.write('var largestSources = [\n') for record in sources: entry = {'size': FormatBytes(record['size']), 'symbol_count': str(record['symbol_count']), 'location': record['path']} out.write(json.dumps(entry)) out.write(',\n') dumped += 1 if dumped >= n: return finally: out.write('];\n') out.flush() out.close() # TODO(andrewhayden): Only used for legacy reports. Delete. def DumpLargestVTables(symbols, outfile, n): vtables = [] for symbol, _type, size, path in symbols: if 'vtable for ' in symbol: vtables.append({'symbol': symbol, 'path': path, 'size': size}) vtables = sorted(vtables, key=lambda x: -x['size']) dumped = 0 out = open(outfile, 'w') try: out.write('var largestVTables = [\n') for record in vtables: entry = {'size': FormatBytes(record['size']), 'symbol': record['symbol'], 'location': record['path']} out.write(json.dumps(entry)) out.write(',\n') dumped += 1 if dumped >= n: return finally: out.write('];\n') out.flush() out.close() # Regex for parsing "nm" output. A sample line looks like this: # 0167b39c 00000018 t ACCESS_DESCRIPTION_free /path/file.c:95 # # The fields are: address, size, type, name, source location # Regular expression explained ( see also: https://xkcd.com/208 ): # ([0-9a-f]{8,}+) The address # [\s]+ Whitespace separator # ([0-9a-f]{8,}+) The size. From here on out it's all optional. # [\s]+ Whitespace separator # (\S?) The symbol type, which is any non-whitespace char # [\s*] Whitespace separator # ([^\t]*) Symbol name, any non-tab character (spaces ok!) # [\t]? Tab separator # (.*) The location (filename[:linennum|?][ (discriminator n)] sNmPattern = re.compile( r'([0-9a-f]{8,})[\s]+([0-9a-f]{8,})[\s]*(\S?)[\s*]([^\t]*)[\t]?(.*)') class Progress(): def __init__(self): self.count = 0 self.skip_count = 0 self.collisions = 0 self.time_last_output = time.time() self.count_last_output = 0 self.disambiguations = 0 self.was_ambiguous = 0 def RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs, disambiguate, src_path): nm_output = RunNm(library, nm_binary) nm_output_lines = nm_output.splitlines() nm_output_lines_len = len(nm_output_lines) address_symbol = {} progress = Progress() def map_address_symbol(symbol, addr): progress.count += 1 if addr in address_symbol: # 'Collision between %s and %s.' % (str(symbol.name), # str(address_symbol[addr].name)) progress.collisions += 1 else: if symbol.disambiguated: progress.disambiguations += 1 if symbol.was_ambiguous: progress.was_ambiguous += 1 address_symbol[addr] = symbol progress_output() def progress_output(): progress_chunk = 100 if progress.count % progress_chunk == 0: time_now = time.time() time_spent = time_now - progress.time_last_output if time_spent > 1.0: # Only output at most once per second. progress.time_last_output = time_now chunk_size = progress.count - progress.count_last_output progress.count_last_output = progress.count if time_spent > 0: speed = chunk_size / time_spent else: speed = 0 progress_percent = (100.0 * (progress.count + progress.skip_count) / nm_output_lines_len) disambiguation_percent = 0 if progress.disambiguations != 0: disambiguation_percent = (100.0 * progress.disambiguations / progress.was_ambiguous) sys.stdout.write('\r%.1f%%: Looked up %d symbols (%d collisions, ' '%d disambiguations where %.1f%% succeeded)' ' - %.1f lookups/s.' % (progress_percent, progress.count, progress.collisions, progress.disambiguations, disambiguation_percent, speed)) # In case disambiguation was disabled, we remove the source path (which upon # being set signals the symbolizer to enable disambiguation) if not disambiguate: src_path = None symbolizer = elf_symbolizer.ELFSymbolizer(library, addr2line_binary, map_address_symbol, max_concurrent_jobs=jobs, source_root_path=src_path) user_interrupted = False try: for line in nm_output_lines: match = sNmPattern.match(line) if match: location = match.group(5) if not location: addr = int(match.group(1), 16) size = int(match.group(2), 16) if addr in address_symbol: # Already looked up, shortcut # ELFSymbolizer. map_address_symbol(address_symbol[addr], addr) continue elif size == 0: # Save time by not looking up empty symbols (do they even exist?) print('Empty symbol: ' + line) else: symbolizer.SymbolizeAsync(addr, addr) continue progress.skip_count += 1 except KeyboardInterrupt: user_interrupted = True print('Interrupting - killing subprocesses. Please wait.') try: symbolizer.Join() except KeyboardInterrupt: # Don't want to abort here since we will be finished in a few seconds. user_interrupted = True print('Patience you must have my young padawan.') print '' if user_interrupted: print('Skipping the rest of the file mapping. ' 'Output will not be fully classified.') symbol_path_origin_dir = os.path.dirname(os.path.abspath(library)) with open(outfile, 'w') as out: for line in nm_output_lines: match = sNmPattern.match(line) if match: location = match.group(5) if not location: addr = int(match.group(1), 16) symbol = address_symbol.get(addr) if symbol is not None: path = '??' if symbol.source_path is not None: path = os.path.abspath(os.path.join(symbol_path_origin_dir, symbol.source_path)) line_number = 0 if symbol.source_line is not None: line_number = symbol.source_line out.write('%s\t%s:%d\n' % (line, path, line_number)) continue out.write('%s\n' % line) print('%d symbols in the results.' % len(address_symbol)) def RunNm(binary, nm_binary): cmd = [nm_binary, '-C', '--print-size', '--size-sort', '--reverse-sort', binary] nm_process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (process_output, err_output) = nm_process.communicate() if nm_process.returncode != 0: if err_output: raise Exception, err_output else: raise Exception, process_output return process_output def GetNmSymbols(nm_infile, outfile, library, jobs, verbose, addr2line_binary, nm_binary, disambiguate, src_path): if nm_infile is None: if outfile is None: outfile = tempfile.NamedTemporaryFile(delete=False).name if verbose: print 'Running parallel addr2line, dumping symbols to ' + outfile RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs, disambiguate, src_path) nm_infile = outfile elif verbose: print 'Using nm input from ' + nm_infile with file(nm_infile, 'r') as infile: return list(binary_size_utils.ParseNm(infile)) PAK_RESOURCE_ID_TO_STRING = { "inited": False } def LoadPakIdsFromResourceFile(filename): """Given a file name, it loads everything that looks like a resource id into PAK_RESOURCE_ID_TO_STRING.""" with open(filename) as resource_header: for line in resource_header: if line.startswith("#define "): line_data = line.split() if len(line_data) == 3: try: resource_number = int(line_data[2]) resource_name = line_data[1] PAK_RESOURCE_ID_TO_STRING[resource_number] = resource_name except ValueError: pass def GetReadablePakResourceName(pak_file, resource_id): """Pak resources have a numeric identifier. It is not helpful when trying to locate where footprint is generated. This does its best to map the number to a usable string.""" if not PAK_RESOURCE_ID_TO_STRING['inited']: # Try to find resource header files generated by grit when # building the pak file. We'll look for files named *resources.h" # and lines of the type: # #define MY_RESOURCE_JS 1234 PAK_RESOURCE_ID_TO_STRING['inited'] = True gen_dir = os.path.join(os.path.dirname(pak_file), 'gen') if os.path.isdir(gen_dir): for dirname, _dirs, files in os.walk(gen_dir): for filename in files: if filename.endswith('resources.h'): LoadPakIdsFromResourceFile(os.path.join(dirname, filename)) return PAK_RESOURCE_ID_TO_STRING.get(resource_id, 'Pak Resource %d' % resource_id) def AddPakData(symbols, pak_file): """Adds pseudo-symbols from a pak file.""" pak_file = os.path.abspath(pak_file) with open(pak_file, 'rb') as pak: data = pak.read() PAK_FILE_VERSION = 4 HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries) # and one uint8 (encoding of text resources) INDEX_ENTRY_SIZE = 2 + 4 # Each entry is a uint16 and a uint32. version, num_entries, _encoding = struct.unpack('<IIB', data[:HEADER_LENGTH]) assert version == PAK_FILE_VERSION, ('Unsupported pak file ' 'version (%d) in %s. Only ' 'support version %d' % (version, pak_file, PAK_FILE_VERSION)) if num_entries > 0: # Read the index and data. data = data[HEADER_LENGTH:] for _ in range(num_entries): resource_id, offset = struct.unpack('<HI', data[:INDEX_ENTRY_SIZE]) data = data[INDEX_ENTRY_SIZE:] _next_id, next_offset = struct.unpack('<HI', data[:INDEX_ENTRY_SIZE]) resource_size = next_offset - offset symbol_name = GetReadablePakResourceName(pak_file, resource_id) symbol_path = pak_file symbol_type = 'd' # Data. Approximation. symbol_size = resource_size symbols.append((symbol_name, symbol_type, symbol_size, symbol_path)) def _find_in_system_path(binary): """Locate the full path to binary in the system path or return None if not found.""" system_path = os.environ["PATH"].split(os.pathsep) for path in system_path: binary_path = os.path.join(path, binary) if os.path.isfile(binary_path): return binary_path return None def CheckDebugFormatSupport(library, addr2line_binary): """Kills the program if debug data is in an unsupported format. There are two common versions of the DWARF debug formats and since we are right now transitioning from DWARF2 to newer formats, it's possible to have a mix of tools that are not compatible. Detect that and abort rather than produce meaningless output.""" tool_output = subprocess.check_output([addr2line_binary, '--version']) version_re = re.compile(r'^GNU [^ ]+ .* (\d+).(\d+).*?$', re.M) parsed_output = version_re.match(tool_output) major = int(parsed_output.group(1)) minor = int(parsed_output.group(2)) supports_dwarf4 = major > 2 or major == 2 and minor > 22 if supports_dwarf4: return print('Checking version of debug information in %s.' % library) debug_info = subprocess.check_output(['readelf', '--debug-dump=info', '--dwarf-depth=1', library]) dwarf_version_re = re.compile(r'^\s+Version:\s+(\d+)$', re.M) parsed_dwarf_format_output = dwarf_version_re.search(debug_info) version = int(parsed_dwarf_format_output.group(1)) if version > 2: print('The supplied tools only support DWARF2 debug data but the binary\n' + 'uses DWARF%d. Update the tools or compile the binary\n' % version + 'with -gdwarf-2.') sys.exit(1) def main(): usage = """%prog [options] Runs a spatial analysis on a given library, looking up the source locations of its symbols and calculating how much space each directory, source file, and so on is taking. The result is a report that can be used to pinpoint sources of large portions of the binary, etceteras. Under normal circumstances, you only need to pass two arguments, thusly: %prog --library /path/to/library --destdir /path/to/output In this mode, the program will dump the symbols from the specified library and map those symbols back to source locations, producing a web-based report in the specified output directory. Other options are available via '--help'. """ parser = optparse.OptionParser(usage=usage) parser.add_option('--nm-in', metavar='PATH', help='if specified, use nm input from <path> instead of ' 'generating it. Note that source locations should be ' 'present in the file; i.e., no addr2line symbol lookups ' 'will be performed when this option is specified. ' 'Mutually exclusive with --library.') parser.add_option('--destdir', metavar='PATH', help='write output to the specified directory. An HTML ' 'report is generated here along with supporting files; ' 'any existing report will be overwritten.') parser.add_option('--library', metavar='PATH', help='if specified, process symbols in the library at ' 'the specified path. Mutually exclusive with --nm-in.') parser.add_option('--pak', metavar='PATH', help='if specified, includes the contents of the ' 'specified *.pak file in the output.') parser.add_option('--nm-binary', help='use the specified nm binary to analyze library. ' 'This is to be used when the nm in the path is not for ' 'the right architecture or of the right version.') parser.add_option('--addr2line-binary', help='use the specified addr2line binary to analyze ' 'library. This is to be used when the addr2line in ' 'the path is not for the right architecture or ' 'of the right version.') parser.add_option('--jobs', type='int', help='number of jobs to use for the parallel ' 'addr2line processing pool; defaults to 1. More ' 'jobs greatly improve throughput but eat RAM like ' 'popcorn, and take several gigabytes each. Start low ' 'and ramp this number up until your machine begins to ' 'struggle with RAM. ' 'This argument is only valid when using --library.') parser.add_option('-v', dest='verbose', action='store_true', help='be verbose, printing lots of status information.') parser.add_option('--nm-out', metavar='PATH', help='keep the nm output file, and store it at the ' 'specified path. This is useful if you want to see the ' 'fully processed nm output after the symbols have been ' 'mapped to source locations. By default, a tempfile is ' 'used and is deleted when the program terminates.' 'This argument is only valid when using --library.') parser.add_option('--legacy', action='store_true', help='emit legacy binary size report instead of modern') parser.add_option('--disable-disambiguation', action='store_true', help='disables the disambiguation process altogether,' ' NOTE: this may, depending on your toolchain, produce' ' output with some symbols at the top layer if addr2line' ' could not get the entire source path.') parser.add_option('--source-path', default='./', help='the path to the source code of the output binary, ' 'default set to current directory. Used in the' ' disambiguation process.') opts, _args = parser.parse_args() if ((not opts.library) and (not opts.nm_in)) or (opts.library and opts.nm_in): parser.error('exactly one of --library or --nm-in is required') if (opts.nm_in): if opts.jobs: print >> sys.stderr, ('WARNING: --jobs has no effect ' 'when used with --nm-in') if not opts.destdir: parser.error('--destdir is required argument') if not opts.jobs: # Use the number of processors but cap between 2 and 4 since raw # CPU power isn't the limiting factor. It's I/O limited, memory # bus limited and available-memory-limited. Too many processes and # the computer will run out of memory and it will be slow. opts.jobs = max(2, min(4, str(multiprocessing.cpu_count()))) if opts.addr2line_binary: assert os.path.isfile(opts.addr2line_binary) addr2line_binary = opts.addr2line_binary else: addr2line_binary = _find_in_system_path('addr2line') assert addr2line_binary, 'Unable to find addr2line in the path. '\ 'Use --addr2line-binary to specify location.' if opts.nm_binary: assert os.path.isfile(opts.nm_binary) nm_binary = opts.nm_binary else: nm_binary = _find_in_system_path('nm') assert nm_binary, 'Unable to find nm in the path. Use --nm-binary '\ 'to specify location.' if opts.pak: assert os.path.isfile(opts.pak), 'Could not find ' % opts.pak print('addr2line: %s' % addr2line_binary) print('nm: %s' % nm_binary) if opts.library: CheckDebugFormatSupport(opts.library, addr2line_binary) symbols = GetNmSymbols(opts.nm_in, opts.nm_out, opts.library, opts.jobs, opts.verbose is True, addr2line_binary, nm_binary, opts.disable_disambiguation is None, opts.source_path) if opts.pak: AddPakData(symbols, opts.pak) if not os.path.exists(opts.destdir): os.makedirs(opts.destdir, 0755) if opts.legacy: # legacy report DumpTreemap(symbols, os.path.join(opts.destdir, 'treemap-dump.js')) DumpLargestSymbols(symbols, os.path.join(opts.destdir, 'largest-symbols.js'), 100) DumpLargestSources(symbols, os.path.join(opts.destdir, 'largest-sources.js'), 100) DumpLargestVTables(symbols, os.path.join(opts.destdir, 'largest-vtables.js'), 100) treemap_out = os.path.join(opts.destdir, 'webtreemap') if not os.path.exists(treemap_out): os.makedirs(treemap_out, 0755) treemap_src = os.path.join('third_party', 'webtreemap', 'src') shutil.copy(os.path.join(treemap_src, 'COPYING'), treemap_out) shutil.copy(os.path.join(treemap_src, 'webtreemap.js'), treemap_out) shutil.copy(os.path.join(treemap_src, 'webtreemap.css'), treemap_out) shutil.copy(os.path.join('tools', 'binary_size', 'legacy_template', 'index.html'), opts.destdir) else: # modern report if opts.library: symbol_path_origin_dir = os.path.dirname(os.path.abspath(opts.library)) else: # Just a guess. Hopefully all paths in the input file are absolute. symbol_path_origin_dir = os.path.abspath(os.getcwd()) data_js_file_name = os.path.join(opts.destdir, 'data.js') DumpCompactTree(symbols, symbol_path_origin_dir, data_js_file_name) d3_out = os.path.join(opts.destdir, 'd3') if not os.path.exists(d3_out): os.makedirs(d3_out, 0755) d3_src = os.path.join(os.path.dirname(__file__), '..', '..', 'third_party', 'd3', 'src') template_src = os.path.join(os.path.dirname(__file__), 'template') shutil.copy(os.path.join(d3_src, 'LICENSE'), d3_out) shutil.copy(os.path.join(d3_src, 'd3.js'), d3_out) shutil.copy(os.path.join(template_src, 'index.html'), opts.destdir) shutil.copy(os.path.join(template_src, 'D3SymbolTreeMap.js'), opts.destdir) print 'Report saved to ' + opts.destdir + '/index.html' if __name__ == '__main__': sys.exit(main())
hgl888/chromium-crosswalk-efl
tools/binary_size/run_binary_size_analysis.py
Python
bsd-3-clause
35,816
0.010051
# -*- coding: utf-8 -*- # # Tutorial documentation build configuration file, created by # sphinx-quickstart on Thu Dec 8 12:57:03 2011. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os, time # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' def get_copyright(): return u'2001-%s, AdaCore' % time.strftime("%Y") # General information about the project. project = u'Tutorial' copyright = get_copyright() def get_version(): """Extract the version from VERSION.txt""" version_file = "../../VERSION.txt" if os.path.isfile(version_file): return file(version_file).readline() else: return "0.0" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = get_version() # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '../users_guide/adacore_transparent.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '../users_guide/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Tutorialdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Tutorial.tex', u'Tutorial Documentation', u'AdaCore', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'tutorial', u'Tutorial Documentation', [u'AdaCore'], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Tutorial' epub_author = u'AdaCore' epub_publisher = u'AdaCore' epub_copyright = copyright # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True
qunying/gps
docs/tutorial/conf.py
Python
gpl-3.0
8,459
0.006975
from aiohttp.web import View, HTTPFound def http_found(func): async def wrapped(self, *args, **kwargs): await func(self, *args, **kwargs) return HTTPFound(self.request.rel_url) return wrapped class CoreView(View): sensor = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.logger = self.sensor.logger
insolite/alarme
alarme/extras/sensor/web/views/core.py
Python
mit
386
0
""" Tests for Discussion API serializers """ from __future__ import absolute_import import itertools import ddt import httpretty import mock import six from django.test.client import RequestFactory from six.moves.urllib.parse import urlparse # pylint: disable=import-error from lms.djangoapps.discussion.django_comment_client.tests.utils import ForumsEnableMixin from lms.djangoapps.discussion.rest_api.serializers import CommentSerializer, ThreadSerializer, get_context from lms.djangoapps.discussion.rest_api.tests.utils import ( CommentsServiceMockMixin, make_minimal_cs_comment, make_minimal_cs_thread ) from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory from openedx.core.djangoapps.django_comment_common.comment_client.comment import Comment from openedx.core.djangoapps.django_comment_common.comment_client.thread import Thread from openedx.core.djangoapps.django_comment_common.models import ( FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_MODERATOR, FORUM_ROLE_STUDENT, Role ) from student.tests.factories import UserFactory from util.testing import UrlResetMixin from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory @ddt.ddt class SerializerTestMixin(ForumsEnableMixin, CommentsServiceMockMixin, UrlResetMixin): """ Test Mixin for Serializer tests """ @classmethod @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) def setUpClass(cls): super(SerializerTestMixin, cls).setUpClass() cls.course = CourseFactory.create() @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) def setUp(self): super(SerializerTestMixin, self).setUp() httpretty.reset() httpretty.enable() self.addCleanup(httpretty.reset) self.addCleanup(httpretty.disable) self.maxDiff = None # pylint: disable=invalid-name self.user = UserFactory.create() self.register_get_user_response(self.user) self.request = RequestFactory().get("/dummy") self.request.user = self.user self.author = UserFactory.create() def create_role(self, role_name, users, course=None): """Create a Role in self.course with the given name and users""" course = course or self.course role = Role.objects.create(name=role_name, course_id=course.id) role.users = users @ddt.data( (FORUM_ROLE_ADMINISTRATOR, True, False, True), (FORUM_ROLE_ADMINISTRATOR, False, True, False), (FORUM_ROLE_MODERATOR, True, False, True), (FORUM_ROLE_MODERATOR, False, True, False), (FORUM_ROLE_COMMUNITY_TA, True, False, True), (FORUM_ROLE_COMMUNITY_TA, False, True, False), (FORUM_ROLE_STUDENT, True, False, True), (FORUM_ROLE_STUDENT, False, True, True), ) @ddt.unpack def test_anonymity(self, role_name, anonymous, anonymous_to_peers, expected_serialized_anonymous): """ Test that content is properly made anonymous. Content should be anonymous iff the anonymous field is true or the anonymous_to_peers field is true and the requester does not have a privileged role. role_name is the name of the requester's role. anonymous is the value of the anonymous field in the content. anonymous_to_peers is the value of the anonymous_to_peers field in the content. expected_serialized_anonymous is whether the content should actually be anonymous in the API output when requested by a user with the given role. """ self.create_role(role_name, [self.user]) serialized = self.serialize( self.make_cs_content({"anonymous": anonymous, "anonymous_to_peers": anonymous_to_peers}) ) actual_serialized_anonymous = serialized["author"] is None self.assertEqual(actual_serialized_anonymous, expected_serialized_anonymous) @ddt.data( (FORUM_ROLE_ADMINISTRATOR, False, "Staff"), (FORUM_ROLE_ADMINISTRATOR, True, None), (FORUM_ROLE_MODERATOR, False, "Staff"), (FORUM_ROLE_MODERATOR, True, None), (FORUM_ROLE_COMMUNITY_TA, False, "Community TA"), (FORUM_ROLE_COMMUNITY_TA, True, None), (FORUM_ROLE_STUDENT, False, None), (FORUM_ROLE_STUDENT, True, None), ) @ddt.unpack def test_author_labels(self, role_name, anonymous, expected_label): """ Test correctness of the author_label field. The label should be "Staff", "Staff", or "Community TA" for the Administrator, Moderator, and Community TA roles, respectively, but the label should not be present if the content is anonymous. role_name is the name of the author's role. anonymous is the value of the anonymous field in the content. expected_label is the expected value of the author_label field in the API output. """ self.create_role(role_name, [self.author]) serialized = self.serialize(self.make_cs_content({"anonymous": anonymous})) self.assertEqual(serialized["author_label"], expected_label) def test_abuse_flagged(self): serialized = self.serialize(self.make_cs_content({"abuse_flaggers": [str(self.user.id)]})) self.assertEqual(serialized["abuse_flagged"], True) def test_voted(self): thread_id = "test_thread" self.register_get_user_response(self.user, upvoted_ids=[thread_id]) serialized = self.serialize(self.make_cs_content({"id": thread_id})) self.assertEqual(serialized["voted"], True) @ddt.ddt class ThreadSerializerSerializationTest(SerializerTestMixin, SharedModuleStoreTestCase): """Tests for ThreadSerializer serialization.""" def make_cs_content(self, overrides): """ Create a thread with the given overrides, plus some useful test data. """ merged_overrides = { "course_id": six.text_type(self.course.id), "user_id": str(self.author.id), "username": self.author.username, "read": True, "endorsed": True, "resp_total": 0, } merged_overrides.update(overrides) return make_minimal_cs_thread(merged_overrides) def serialize(self, thread): """ Create a serializer with an appropriate context and use it to serialize the given thread, returning the result. """ return ThreadSerializer(thread, context=get_context(self.course, self.request)).data def test_basic(self): thread = make_minimal_cs_thread({ "id": "test_thread", "course_id": six.text_type(self.course.id), "commentable_id": "test_topic", "user_id": str(self.author.id), "username": self.author.username, "title": "Test Title", "body": "Test body", "pinned": True, "votes": {"up_count": 4}, "comments_count": 5, "unread_comments_count": 3, }) expected = self.expected_thread_data({ "author": self.author.username, "vote_count": 4, "comment_count": 6, "unread_comment_count": 3, "pinned": True, "editable_fields": ["abuse_flagged", "following", "read", "voted"], }) self.assertEqual(self.serialize(thread), expected) thread["thread_type"] = "question" expected.update({ "type": "question", "comment_list_url": None, "endorsed_comment_list_url": ( "http://testserver/api/discussion/v1/comments/?thread_id=test_thread&endorsed=True" ), "non_endorsed_comment_list_url": ( "http://testserver/api/discussion/v1/comments/?thread_id=test_thread&endorsed=False" ), }) self.assertEqual(self.serialize(thread), expected) def test_pinned_missing(self): """ Make sure that older threads in the comments service without the pinned field do not break serialization """ thread_data = self.make_cs_content({}) del thread_data["pinned"] self.register_get_thread_response(thread_data) serialized = self.serialize(thread_data) self.assertEqual(serialized["pinned"], False) def test_group(self): self.course.cohort_config = {"cohorted": True} modulestore().update_item(self.course, ModuleStoreEnum.UserID.test) cohort = CohortFactory.create(course_id=self.course.id) serialized = self.serialize(self.make_cs_content({"group_id": cohort.id})) self.assertEqual(serialized["group_id"], cohort.id) self.assertEqual(serialized["group_name"], cohort.name) def test_following(self): thread_id = "test_thread" self.register_get_user_response(self.user, subscribed_thread_ids=[thread_id]) serialized = self.serialize(self.make_cs_content({"id": thread_id})) self.assertEqual(serialized["following"], True) def test_response_count(self): thread_data = self.make_cs_content({"resp_total": 2}) self.register_get_thread_response(thread_data) serialized = self.serialize(thread_data) self.assertEqual(serialized["response_count"], 2) def test_response_count_missing(self): thread_data = self.make_cs_content({}) del thread_data["resp_total"] self.register_get_thread_response(thread_data) serialized = self.serialize(thread_data) self.assertNotIn("response_count", serialized) @ddt.ddt class CommentSerializerTest(SerializerTestMixin, SharedModuleStoreTestCase): """Tests for CommentSerializer.""" def setUp(self): super(CommentSerializerTest, self).setUp() self.endorser = UserFactory.create() self.endorsed_at = "2015-05-18T12:34:56Z" def make_cs_content(self, overrides=None, with_endorsement=False): """ Create a comment with the given overrides, plus some useful test data. """ merged_overrides = { "user_id": str(self.author.id), "username": self.author.username } if with_endorsement: merged_overrides["endorsement"] = { "user_id": str(self.endorser.id), "time": self.endorsed_at } merged_overrides.update(overrides or {}) return make_minimal_cs_comment(merged_overrides) def serialize(self, comment, thread_data=None): """ Create a serializer with an appropriate context and use it to serialize the given comment, returning the result. """ context = get_context(self.course, self.request, make_minimal_cs_thread(thread_data)) return CommentSerializer(comment, context=context).data def test_basic(self): comment = { "type": "comment", "id": "test_comment", "thread_id": "test_thread", "user_id": str(self.author.id), "username": self.author.username, "anonymous": False, "anonymous_to_peers": False, "created_at": "2015-04-28T00:00:00Z", "updated_at": "2015-04-28T11:11:11Z", "body": "Test body", "endorsed": False, "abuse_flaggers": [], "votes": {"up_count": 4}, "children": [], "child_count": 0, } expected = { "id": "test_comment", "thread_id": "test_thread", "parent_id": None, "author": self.author.username, "author_label": None, "created_at": "2015-04-28T00:00:00Z", "updated_at": "2015-04-28T11:11:11Z", "raw_body": "Test body", "rendered_body": "<p>Test body</p>", "endorsed": False, "endorsed_by": None, "endorsed_by_label": None, "endorsed_at": None, "abuse_flagged": False, "voted": False, "vote_count": 4, "children": [], "editable_fields": ["abuse_flagged", "voted"], "child_count": 0, } self.assertEqual(self.serialize(comment), expected) @ddt.data( *itertools.product( [ FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_STUDENT, ], [True, False] ) ) @ddt.unpack def test_endorsed_by(self, endorser_role_name, thread_anonymous): """ Test correctness of the endorsed_by field. The endorser should be anonymous iff the thread is anonymous to the requester, and the endorser is not a privileged user. endorser_role_name is the name of the endorser's role. thread_anonymous is the value of the anonymous field in the thread. """ self.create_role(endorser_role_name, [self.endorser]) serialized = self.serialize( self.make_cs_content(with_endorsement=True), thread_data={"anonymous": thread_anonymous} ) actual_endorser_anonymous = serialized["endorsed_by"] is None expected_endorser_anonymous = endorser_role_name == FORUM_ROLE_STUDENT and thread_anonymous self.assertEqual(actual_endorser_anonymous, expected_endorser_anonymous) @ddt.data( (FORUM_ROLE_ADMINISTRATOR, "Staff"), (FORUM_ROLE_MODERATOR, "Staff"), (FORUM_ROLE_COMMUNITY_TA, "Community TA"), (FORUM_ROLE_STUDENT, None), ) @ddt.unpack def test_endorsed_by_labels(self, role_name, expected_label): """ Test correctness of the endorsed_by_label field. The label should be "Staff", "Staff", or "Community TA" for the Administrator, Moderator, and Community TA roles, respectively. role_name is the name of the author's role. expected_label is the expected value of the author_label field in the API output. """ self.create_role(role_name, [self.endorser]) serialized = self.serialize(self.make_cs_content(with_endorsement=True)) self.assertEqual(serialized["endorsed_by_label"], expected_label) def test_endorsed_at(self): serialized = self.serialize(self.make_cs_content(with_endorsement=True)) self.assertEqual(serialized["endorsed_at"], self.endorsed_at) def test_children(self): comment = self.make_cs_content({ "id": "test_root", "children": [ self.make_cs_content({ "id": "test_child_1", "parent_id": "test_root", }), self.make_cs_content({ "id": "test_child_2", "parent_id": "test_root", "children": [ self.make_cs_content({ "id": "test_grandchild", "parent_id": "test_child_2" }) ], }), ], }) serialized = self.serialize(comment) self.assertEqual(serialized["children"][0]["id"], "test_child_1") self.assertEqual(serialized["children"][0]["parent_id"], "test_root") self.assertEqual(serialized["children"][1]["id"], "test_child_2") self.assertEqual(serialized["children"][1]["parent_id"], "test_root") self.assertEqual(serialized["children"][1]["children"][0]["id"], "test_grandchild") self.assertEqual(serialized["children"][1]["children"][0]["parent_id"], "test_child_2") @ddt.ddt class ThreadSerializerDeserializationTest( ForumsEnableMixin, CommentsServiceMockMixin, UrlResetMixin, SharedModuleStoreTestCase ): """Tests for ThreadSerializer deserialization.""" @classmethod @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) def setUpClass(cls): super(ThreadSerializerDeserializationTest, cls).setUpClass() cls.course = CourseFactory.create() @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) def setUp(self): super(ThreadSerializerDeserializationTest, self).setUp() httpretty.reset() httpretty.enable() self.addCleanup(httpretty.reset) self.addCleanup(httpretty.disable) self.user = UserFactory.create() self.register_get_user_response(self.user) self.request = RequestFactory().get("/dummy") self.request.user = self.user self.minimal_data = { "course_id": six.text_type(self.course.id), "topic_id": "test_topic", "type": "discussion", "title": "Test Title", "raw_body": "Test body", } self.existing_thread = Thread(**make_minimal_cs_thread({ "id": "existing_thread", "course_id": six.text_type(self.course.id), "commentable_id": "original_topic", "thread_type": "discussion", "title": "Original Title", "body": "Original body", "user_id": str(self.user.id), "username": self.user.username, "read": "False", "endorsed": "False" })) def save_and_reserialize(self, data, instance=None): """ Create a serializer with the given data and (if updating) instance, ensure that it is valid, save the result, and return the full thread data from the serializer. """ serializer = ThreadSerializer( instance, data=data, partial=(instance is not None), context=get_context(self.course, self.request) ) self.assertTrue(serializer.is_valid()) serializer.save() return serializer.data def test_create_minimal(self): self.register_post_thread_response({"id": "test_id", "username": self.user.username}) saved = self.save_and_reserialize(self.minimal_data) self.assertEqual( urlparse(httpretty.last_request().path).path, "/api/v1/test_topic/threads" ) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [six.text_type(self.course.id)], "commentable_id": ["test_topic"], "thread_type": ["discussion"], "title": ["Test Title"], "body": ["Test body"], "user_id": [str(self.user.id)], } ) self.assertEqual(saved["id"], "test_id") def test_create_all_fields(self): self.register_post_thread_response({"id": "test_id", "username": self.user.username}) data = self.minimal_data.copy() data["group_id"] = 42 self.save_and_reserialize(data) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [six.text_type(self.course.id)], "commentable_id": ["test_topic"], "thread_type": ["discussion"], "title": ["Test Title"], "body": ["Test body"], "user_id": [str(self.user.id)], "group_id": ["42"], } ) def test_create_missing_field(self): for field in self.minimal_data: data = self.minimal_data.copy() data.pop(field) serializer = ThreadSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual( serializer.errors, {field: ["This field is required."]} ) @ddt.data("", " ") def test_create_empty_string(self, value): data = self.minimal_data.copy() data.update({field: value for field in ["topic_id", "title", "raw_body"]}) serializer = ThreadSerializer(data=data, context=get_context(self.course, self.request)) self.assertFalse(serializer.is_valid()) self.assertEqual( serializer.errors, {field: ["This field may not be blank."] for field in ["topic_id", "title", "raw_body"]} ) def test_create_type(self): self.register_post_thread_response({"id": "test_id", "username": self.user.username}) data = self.minimal_data.copy() data["type"] = "question" self.save_and_reserialize(data) data["type"] = "invalid_type" serializer = ThreadSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_update_empty(self): self.register_put_thread_response(self.existing_thread.attributes) self.save_and_reserialize({}, self.existing_thread) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [six.text_type(self.course.id)], "commentable_id": ["original_topic"], "thread_type": ["discussion"], "title": ["Original Title"], "body": ["Original body"], "anonymous": ["False"], "anonymous_to_peers": ["False"], "closed": ["False"], "pinned": ["False"], "user_id": [str(self.user.id)], "read": ["False"], } ) @ddt.data(True, False) def test_update_all(self, read): self.register_put_thread_response(self.existing_thread.attributes) data = { "topic_id": "edited_topic", "type": "question", "title": "Edited Title", "raw_body": "Edited body", "read": read, } saved = self.save_and_reserialize(data, self.existing_thread) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [six.text_type(self.course.id)], "commentable_id": ["edited_topic"], "thread_type": ["question"], "title": ["Edited Title"], "body": ["Edited body"], "anonymous": ["False"], "anonymous_to_peers": ["False"], "closed": ["False"], "pinned": ["False"], "user_id": [str(self.user.id)], "read": [str(read)], } ) for key in data: self.assertEqual(saved[key], data[key]) @ddt.data("", " ") def test_update_empty_string(self, value): serializer = ThreadSerializer( self.existing_thread, data={field: value for field in ["topic_id", "title", "raw_body"]}, partial=True, context=get_context(self.course, self.request) ) self.assertFalse(serializer.is_valid()) self.assertEqual( serializer.errors, {field: ["This field may not be blank."] for field in ["topic_id", "title", "raw_body"]} ) def test_update_course_id(self): serializer = ThreadSerializer( self.existing_thread, data={"course_id": "some/other/course"}, partial=True, context=get_context(self.course, self.request) ) self.assertFalse(serializer.is_valid()) self.assertEqual( serializer.errors, {"course_id": ["This field is not allowed in an update."]} ) @ddt.ddt class CommentSerializerDeserializationTest(ForumsEnableMixin, CommentsServiceMockMixin, SharedModuleStoreTestCase): """Tests for ThreadSerializer deserialization.""" @classmethod def setUpClass(cls): super(CommentSerializerDeserializationTest, cls).setUpClass() cls.course = CourseFactory.create() def setUp(self): super(CommentSerializerDeserializationTest, self).setUp() httpretty.reset() httpretty.enable() self.addCleanup(httpretty.reset) self.addCleanup(httpretty.disable) self.user = UserFactory.create() self.register_get_user_response(self.user) self.request = RequestFactory().get("/dummy") self.request.user = self.user self.minimal_data = { "thread_id": "test_thread", "raw_body": "Test body", } self.existing_comment = Comment(**make_minimal_cs_comment({ "id": "existing_comment", "thread_id": "existing_thread", "body": "Original body", "user_id": str(self.user.id), "username": self.user.username, "course_id": six.text_type(self.course.id), })) def save_and_reserialize(self, data, instance=None): """ Create a serializer with the given data, ensure that it is valid, save the result, and return the full comment data from the serializer. """ context = get_context( self.course, self.request, make_minimal_cs_thread({"course_id": six.text_type(self.course.id)}) ) serializer = CommentSerializer( instance, data=data, partial=(instance is not None), context=context ) self.assertTrue(serializer.is_valid()) serializer.save() return serializer.data @ddt.data(None, "test_parent") def test_create_success(self, parent_id): data = self.minimal_data.copy() if parent_id: data["parent_id"] = parent_id self.register_get_comment_response({"thread_id": "test_thread", "id": parent_id}) self.register_post_comment_response( {"id": "test_comment", "username": self.user.username}, thread_id="test_thread", parent_id=parent_id ) saved = self.save_and_reserialize(data) expected_url = ( "/api/v1/comments/{}".format(parent_id) if parent_id else "/api/v1/threads/test_thread/comments" ) self.assertEqual(urlparse(httpretty.last_request().path).path, expected_url) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [six.text_type(self.course.id)], "body": ["Test body"], "user_id": [str(self.user.id)], } ) self.assertEqual(saved["id"], "test_comment") self.assertEqual(saved["parent_id"], parent_id) def test_create_all_fields(self): data = self.minimal_data.copy() data["parent_id"] = "test_parent" data["endorsed"] = True self.register_get_comment_response({"thread_id": "test_thread", "id": "test_parent"}) self.register_post_comment_response( {"id": "test_comment", "username": self.user.username}, thread_id="test_thread", parent_id="test_parent" ) self.save_and_reserialize(data) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [six.text_type(self.course.id)], "body": ["Test body"], "user_id": [str(self.user.id)], "endorsed": ["True"], } ) def test_create_parent_id_nonexistent(self): self.register_get_comment_error_response("bad_parent", 404) data = self.minimal_data.copy() data["parent_id"] = "bad_parent" context = get_context(self.course, self.request, make_minimal_cs_thread()) serializer = CommentSerializer(data=data, context=context) self.assertFalse(serializer.is_valid()) self.assertEqual( serializer.errors, { "non_field_errors": [ "parent_id does not identify a comment in the thread identified by thread_id." ] } ) def test_create_parent_id_wrong_thread(self): self.register_get_comment_response({"thread_id": "different_thread", "id": "test_parent"}) data = self.minimal_data.copy() data["parent_id"] = "test_parent" context = get_context(self.course, self.request, make_minimal_cs_thread()) serializer = CommentSerializer(data=data, context=context) self.assertFalse(serializer.is_valid()) self.assertEqual( serializer.errors, { "non_field_errors": [ "parent_id does not identify a comment in the thread identified by thread_id." ] } ) @ddt.data(None, -1, 0, 2, 5) def test_create_parent_id_too_deep(self, max_depth): with mock.patch("lms.djangoapps.discussion.django_comment_client.utils.MAX_COMMENT_DEPTH", max_depth): data = self.minimal_data.copy() context = get_context(self.course, self.request, make_minimal_cs_thread()) if max_depth is None or max_depth >= 0: if max_depth != 0: self.register_get_comment_response({ "id": "not_too_deep", "thread_id": "test_thread", "depth": max_depth - 1 if max_depth else 100 }) data["parent_id"] = "not_too_deep" else: data["parent_id"] = None serializer = CommentSerializer(data=data, context=context) self.assertTrue(serializer.is_valid(), serializer.errors) if max_depth is not None: if max_depth >= 0: self.register_get_comment_response({ "id": "too_deep", "thread_id": "test_thread", "depth": max_depth }) data["parent_id"] = "too_deep" else: data["parent_id"] = None serializer = CommentSerializer(data=data, context=context) self.assertFalse(serializer.is_valid()) self.assertEqual(serializer.errors, {"non_field_errors": ["Comment level is too deep."]}) def test_create_missing_field(self): for field in self.minimal_data: data = self.minimal_data.copy() data.pop(field) serializer = CommentSerializer( data=data, context=get_context(self.course, self.request, make_minimal_cs_thread()) ) self.assertFalse(serializer.is_valid()) self.assertEqual( serializer.errors, {field: ["This field is required."]} ) def test_create_endorsed(self): # TODO: The comments service doesn't populate the endorsement field on # comment creation, so this is sadly realistic self.register_post_comment_response({"username": self.user.username}, thread_id="test_thread") data = self.minimal_data.copy() data["endorsed"] = True saved = self.save_and_reserialize(data) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [six.text_type(self.course.id)], "body": ["Test body"], "user_id": [str(self.user.id)], "endorsed": ["True"], } ) self.assertTrue(saved["endorsed"]) self.assertIsNone(saved["endorsed_by"]) self.assertIsNone(saved["endorsed_by_label"]) self.assertIsNone(saved["endorsed_at"]) def test_update_empty(self): self.register_put_comment_response(self.existing_comment.attributes) self.save_and_reserialize({}, instance=self.existing_comment) self.assertEqual( httpretty.last_request().parsed_body, { "body": ["Original body"], "course_id": [six.text_type(self.course.id)], "user_id": [str(self.user.id)], "anonymous": ["False"], "anonymous_to_peers": ["False"], "endorsed": ["False"], } ) def test_update_all(self): cs_response_data = self.existing_comment.attributes.copy() cs_response_data["endorsement"] = { "user_id": str(self.user.id), "time": "2015-06-05T00:00:00Z", } self.register_put_comment_response(cs_response_data) data = {"raw_body": "Edited body", "endorsed": True} saved = self.save_and_reserialize(data, instance=self.existing_comment) self.assertEqual( httpretty.last_request().parsed_body, { "body": ["Edited body"], "course_id": [six.text_type(self.course.id)], "user_id": [str(self.user.id)], "anonymous": ["False"], "anonymous_to_peers": ["False"], "endorsed": ["True"], "endorsement_user_id": [str(self.user.id)], } ) for key in data: self.assertEqual(saved[key], data[key]) self.assertEqual(saved["endorsed_by"], self.user.username) self.assertEqual(saved["endorsed_at"], "2015-06-05T00:00:00Z") @ddt.data("", " ") def test_update_empty_raw_body(self, value): serializer = CommentSerializer( self.existing_comment, data={"raw_body": value}, partial=True, context=get_context(self.course, self.request) ) self.assertFalse(serializer.is_valid()) self.assertEqual( serializer.errors, {"raw_body": ["This field may not be blank."]} ) @ddt.data("thread_id", "parent_id") def test_update_non_updatable(self, field): serializer = CommentSerializer( self.existing_comment, data={field: "different_value"}, partial=True, context=get_context(self.course, self.request) ) self.assertFalse(serializer.is_valid()) self.assertEqual( serializer.errors, {field: ["This field is not allowed in an update."]} )
ESOedX/edx-platform
lms/djangoapps/discussion/rest_api/tests/test_serializers.py
Python
agpl-3.0
34,613
0.001416
from setuptools import setup # Replace the place holders with values for your project setup( # Do not use underscores in the plugin name. name='custom-wf-plugin', version='0.1', author='alien', author_email='alien@fastconnect.fr', description='custom generated workflows', # This must correspond to the actual packages in the plugin. packages=['plugin'], license='Apache', zip_safe=True, install_requires=[ # Necessary dependency for developing plugins, do not remove! "cloudify-plugins-common>=3.2" ], test_requires=[ "cloudify-dsl-parser>=3.2" "nose" ] )
victorkeophila/alien4cloud-cloudify3-provider
src/test/resources/outputs/blueprints/openstack/tomcat/plugins/custom_wf_plugin/setup.py
Python
apache-2.0
650
0.001538
#Created on 14 Aug 2014 #@author: neil.butcher from PySide2 import QtCore, QtWidgets from pyqt_units.CurrentUnitSetter import setter class UnitDisplay(QtWidgets.QWidget): def __init__(self, parent, measurement=None, measurementLabel='normal'): QtWidgets.QWidget.__init__(self, parent) self.layout = QtWidgets.QVBoxLayout() self._label = QtWidgets.QLabel('', self) self.layout.addWidget(self._label) self.layout.setMargin(2) self.measurement = measurement self._measurementLabel = measurementLabel setter.changed.connect(self.currentUnitChangedElsewhere) self._update() @QtCore.Slot(str, str, str) def currentUnitChangedElsewhere(self, measName, unitName, measurementLabel): if self.measurement == None: pass elif not measName == self.measurement.name: pass elif not measurementLabel == self._measurementLabel: pass else: self._updateText(unitName) def setMeasurement(self, measurement): self.measurement = measurement self._update() def setMargin(self, margin): self.layout.setMargin(margin) def _update(self): if self.measurement == None: self._updateText('') else: self._updateText(self.measurement.currentUnit(label=self._measurementLabel).name) def _updateText(self, txt): self._label.setText(txt) class UnitComboBox(QtWidgets.QWidget): def __init__(self, parent, measurement=None, measurementLabel='normal'): QtWidgets.QWidget.__init__(self, parent) self.layout = QtWidgets.QVBoxLayout(self) self.layout.setMargin(2) self._box = QtWidgets.QComboBox(self) self.layout.addWidget(self._box) self._measurementLabel = measurementLabel self._box.currentIndexChanged.connect(self.changedToIndex) setter.changed.connect(self.currentUnitChangedElsewhere) self.setMeasurement(measurement) self._update() @QtCore.Slot(str, str, str) def currentUnitChangedElsewhere(self, measName, unitName, measurementLabel): if self.measurement == None: pass elif not measName == self.measurement.name: pass elif not measurementLabel == self._measurementLabel: pass else: self._update() def setMeasurement(self, measurement): self.measurement = None self._box.clear() if measurement is None: pass else: self.itemslist = measurement.units namesList = [] for i in self.itemslist: namesList.append(i.name) self._box.addItems(namesList) self.measurement = measurement self._update() def setMargin(self, margin): self.layout.setMargin(margin) def _update(self): if self.measurement is None: pass else: text = self.measurement.currentUnit(label=self._measurementLabel).name pos = self._box.findText(text) if pos == -1: pos = 0 self._box.setCurrentIndex(pos) @QtCore.Slot(int) def changedToIndex(self, i): if not self.measurement == None: unit = self.itemslist[i] setter.setMeasurementUnit(self.measurement, unit, self._measurementLabel) class AddaptiveDoubleSpinBox(QtWidgets.QDoubleSpinBox): def textFromValue(self, value): s = '{0:g}'.format(value) return s class UnitSpinBox(QtWidgets.QWidget): valueChanged = QtCore.Signal(float) editingFinished = QtCore.Signal() def __init__(self, parent, measurement=None, delta=False, measurementLabel='normal'): QtWidgets.QWidget.__init__(self, parent) self.layout = QtWidgets.QVBoxLayout(self) self.layout.setMargin(2) self._box = AddaptiveDoubleSpinBox(self) self._box.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self._box.setMaximum(2.0e30) self._box.setMinimum(-2.0e30) self._box.setDecimals(12) self.layout.addWidget(self._box) self._box.valueChanged.connect(self._valueChanged) self._box.editingFinished.connect(self._editingFinished) setter.changed.connect(self.currentUnitChangedElsewhere) self.delta = delta self._baseValue = None self._measurementLabel = measurementLabel self.setMeasurement(measurement) self._update() @QtCore.Slot(str, str, str) def currentUnitChangedElsewhere(self, measName, unitName, measurementLabel): if self.measurement is None: pass elif not measName == self.measurement.name: pass elif not measurementLabel == self._measurementLabel: pass else: self._update() def setMeasurement(self, measurement): self.measurement = measurement self._update() def setMargin(self, margin): self.layout.setMargin(margin) def unit(self): return self.measurement.currentUnit(label=self._measurementLabel) def _update(self): if self._baseValue is None: self._box.clear() elif self.measurement is None: self._box.setValue(self._baseValue) elif self.delta: scaledValue = self.unit().scaledDeltaValueOf(self._baseValue) self._box.setValue(scaledValue) else: scaledValue = self.unit().scaledValueOf(self._baseValue) self._box.setValue(scaledValue) def setValue(self, baseValue): self._baseValue = baseValue self._update() def _valueChanged(self, scaledValue): if scaledValue is None: newValue = None elif self.measurement is None: newValue = scaledValue elif self.delta: newValue = self.unit().baseDeltaValueFrom(scaledValue) else: newValue = self.unit().baseValueFrom(scaledValue) a = self._baseValue b = newValue if a is None or abs(a - b) > max(abs(a), abs(b)) * 1e-8: self._baseValue = newValue self.valueChanged.emit(self._baseValue) def _editingFinished(self): self.editingFinished.emit() def value(self): return self._baseValue
ergoregion/pyqt-units
pyqt_units/MeasurementWidgets.py
Python
mit
6,390
0.002191
#---------------------------------------------------------------------- # Copyright (c) 2008 Board of Trustees, Princeton University # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. #---------------------------------------------------------------------- import re from sfa.util.faults import SfaAPIError # for convenience and smoother translation - we should get rid of these functions eventually def get_leaf(hrn): return Xrn(hrn).get_leaf() def get_authority(hrn): return Xrn(hrn).get_authority_hrn() def urn_to_hrn(urn): xrn=Xrn(urn); return (xrn.hrn, xrn.type) def hrn_to_urn(hrn,type): return Xrn(hrn, type=type).urn def hrn_authfor_hrn(parenthrn, hrn): return Xrn.hrn_is_auth_for_hrn(parenthrn, hrn) class Xrn: ########## basic tools on HRNs # split a HRN-like string into pieces # this is like split('.') except for escaped (backslashed) dots # e.g. hrn_split ('a\.b.c.d') -> [ 'a\.b','c','d'] @staticmethod def hrn_split(hrn): return [ x.replace('--sep--','\\.') for x in hrn.replace('\\.','--sep--').split('.') ] # e.g. hrn_leaf ('a\.b.c.d') -> 'd' @staticmethod def hrn_leaf(hrn): return Xrn.hrn_split(hrn)[-1] # e.g. hrn_auth_list ('a\.b.c.d') -> ['a\.b', 'c'] @staticmethod def hrn_auth_list(hrn): return Xrn.hrn_split(hrn)[0:-1] # e.g. hrn_auth ('a\.b.c.d') -> 'a\.b.c' @staticmethod def hrn_auth(hrn): return '.'.join(Xrn.hrn_auth_list(hrn)) # e.g. escape ('a.b') -> 'a\.b' @staticmethod def escape(token): return re.sub(r'([^\\])\.', r'\1\.', token) # e.g. unescape ('a\.b') -> 'a.b' @staticmethod def unescape(token): return token.replace('\\.','.') # Return the HRN authority chain from top to bottom. # e.g. hrn_auth_chain('a\.b.c.d') -> ['a\.b', 'a\.b.c'] @staticmethod def hrn_auth_chain(hrn): parts = Xrn.hrn_auth_list(hrn) chain = [] for i in range(len(parts)): chain.append('.'.join(parts[:i+1])) # Include the HRN itself? #chain.append(hrn) return chain # Is the given HRN a true authority over the namespace of the other # child HRN? # A better alternative than childHRN.startswith(parentHRN) # e.g. hrn_is_auth_for_hrn('a\.b', 'a\.b.c.d') -> True, # but hrn_is_auth_for_hrn('a', 'a\.b.c.d') -> False # Also hrn_is_auth_for_hrn('a\.b.c.d', 'a\.b.c.d') -> True @staticmethod def hrn_is_auth_for_hrn(parenthrn, hrn): if parenthrn == hrn: return True for auth in Xrn.hrn_auth_chain(hrn): if parenthrn == auth: return True return False ########## basic tools on URNs URN_PREFIX = "urn:publicid:IDN" URN_PREFIX_lower = "urn:publicid:idn" @staticmethod def is_urn (text): return text.lower().startswith(Xrn.URN_PREFIX_lower) @staticmethod def urn_full (urn): if Xrn.is_urn(urn): return urn else: return Xrn.URN_PREFIX+urn @staticmethod def urn_meaningful (urn): if Xrn.is_urn(urn): return urn[len(Xrn.URN_PREFIX):] else: return urn @staticmethod def urn_split (urn): return Xrn.urn_meaningful(urn).split('+') @staticmethod def filter_type(urns=None, type=None): if urns is None: urns=[] urn_list = [] if not type: return urns for urn in urns: xrn = Xrn(xrn=urn) if (xrn.type == type): # Xrn is probably a urn so we can just compare types urn_list.append(urn) return urn_list #################### # the local fields that are kept consistent # self.urn # self.hrn # self.type # self.path # provide either urn, or (hrn + type) def __init__ (self, xrn="", type=None, id=None): if not xrn: xrn = "" # user has specified xrn : guess if urn or hrn self.id = id if Xrn.is_urn(xrn): self.hrn=None self.urn=xrn if id: self.urn = "%s:%s" % (self.urn, str(id)) self.urn_to_hrn() else: self.urn=None self.hrn=xrn self.type=type self.hrn_to_urn() self._normalize() # happens all the time .. # if not type: # debug_logger.debug("type-less Xrn's are not safe") def __repr__ (self): result="<XRN u=%s h=%s"%(self.urn,self.hrn) if hasattr(self,'leaf'): result += " leaf=%s"%self.leaf if hasattr(self,'authority'): result += " auth=%s"%self.authority result += ">" return result def get_urn(self): return self.urn def get_hrn(self): return self.hrn def get_type(self): return self.type def get_hrn_type(self): return (self.hrn, self.type) def _normalize(self): if self.hrn is None: raise SfaAPIError, "Xrn._normalize" if not hasattr(self,'leaf'): self.leaf=Xrn.hrn_split(self.hrn)[-1] # self.authority keeps a list if not hasattr(self,'authority'): self.authority=Xrn.hrn_auth_list(self.hrn) def get_leaf(self): self._normalize() return self.leaf def get_authority_hrn(self): self._normalize() return '.'.join( self.authority ) def get_authority_urn(self): self._normalize() return ':'.join( [Xrn.unescape(x) for x in self.authority] ) def set_authority(self, authority): """ update the authority section of an existing urn """ authority_hrn = self.get_authority_hrn() if not authority_hrn.startswith(authority): hrn = ".".join([authority,authority_hrn, self.get_leaf()]) else: hrn = ".".join([authority_hrn, self.get_leaf()]) self.hrn = hrn self.hrn_to_urn() self._normalize() # sliver_id_parts is list that contains the sliver's # slice id and node id def get_sliver_id_parts(self): sliver_id_parts = [] if self.type == 'sliver' or '-' in self.leaf: sliver_id_parts = self.leaf.split('-') return sliver_id_parts def urn_to_hrn(self): """ compute tuple (hrn, type) from urn """ # if not self.urn or not self.urn.startswith(Xrn.URN_PREFIX): if not Xrn.is_urn(self.urn): raise SfaAPIError, "Xrn.urn_to_hrn" parts = Xrn.urn_split(self.urn) type=parts.pop(2) # Remove the authority name (e.g. '.sa') if type == 'authority': name = parts.pop() # Drop the sa. This is a bad hack, but its either this # or completely change how record types are generated/stored if name != 'sa': type = type + "+" + name name ="" else: name = parts.pop(len(parts)-1) # convert parts (list) into hrn (str) by doing the following # 1. remove blank parts # 2. escape dots inside parts # 3. replace ':' with '.' inside parts # 3. join parts using '.' hrn = '.'.join([Xrn.escape(part).replace(':','.') for part in parts if part]) # dont replace ':' in the name section if name: parts = name.split(':') if len(parts) > 1: self.id = ":".join(parts[1:]) name = parts[0] hrn += '.%s' % Xrn.escape(name) self.hrn=str(hrn) self.type=str(type) def hrn_to_urn(self): """ compute urn from (hrn, type) """ # if not self.hrn or self.hrn.startswith(Xrn.URN_PREFIX): if Xrn.is_urn(self.hrn): raise SfaAPIError, "Xrn.hrn_to_urn, hrn=%s"%self.hrn if self.type and self.type.startswith('authority'): self.authority = Xrn.hrn_auth_list(self.hrn) leaf = self.get_leaf() #if not self.authority: # self.authority = [self.hrn] type_parts = self.type.split("+") self.type = type_parts[0] name = 'sa' if len(type_parts) > 1: name = type_parts[1] auth_parts = [part for part in [self.get_authority_urn(), leaf] if part] authority_string = ":".join(auth_parts) else: self.authority = Xrn.hrn_auth_list(self.hrn) name = Xrn.hrn_leaf(self.hrn) authority_string = self.get_authority_urn() if self.type == None: urn = "+".join(['',authority_string,Xrn.unescape(name)]) else: urn = "+".join(['',authority_string,self.type,Xrn.unescape(name)]) if hasattr(self, 'id') and self.id: urn = "%s:%s" % (urn, self.id) self.urn = Xrn.URN_PREFIX + urn def dump_string(self): result="-------------------- XRN\n" result += "URN=%s\n"%self.urn result += "HRN=%s\n"%self.hrn result += "TYPE=%s\n"%self.type result += "LEAF=%s\n"%self.get_leaf() result += "AUTH(hrn format)=%s\n"%self.get_authority_hrn() result += "AUTH(urn format)=%s\n"%self.get_authority_urn() return result
onelab-eu/sfa
sfa/util/xrn.py
Python
mit
10,231
0.010849
"""Hello World API implemented using Google Cloud Endpoints. Contains declarations of endpoint, endpoint methods, as well as the ProtoRPC message class and container required for endpoint method definition. """ import endpoints from protorpc import messages from protorpc import message_types from protorpc import remote # If the request contains path or querystring arguments, # you cannot use a simple Message class. # Instead, you must use a ResourceContainer class REQUEST_CONTAINER = endpoints.ResourceContainer( message_types.VoidMessage, name=messages.StringField(1) ) REQUEST_GREETING_CONTAINER = endpoints.ResourceContainer( period=messages.StringField(1), name=messages.StringField(2) ) package = 'Hello' class Hello(messages.Message): """String that stores a message.""" greeting = messages.StringField(1) @endpoints.api(name='helloworldendpoints', version='v1') class HelloWorldApi(remote.Service): """Helloworld API v1.""" @endpoints.method(message_types.VoidMessage, Hello, path = "sayHello", http_method='GET', name = "sayHello") def say_hello(self, request): return Hello(greeting="Hello World") @endpoints.method(REQUEST_CONTAINER, Hello, path = "sayHelloByName", http_method='GET', name = "sayHelloByName") def say_hello_by_name(self, request): greet = "Hello {}".format(request.name) return Hello(greeting=greet) @endpoints.method(REQUEST_GREETING_CONTAINER, Hello, path = "greetByPeriod", http_method='GET', name = "greetByPeriod") def greet_by_period(self, request): greet = "Good {}, {}!".format(request.period, request.name) return Hello(greeting=greet) APPLICATION = endpoints.api_server([HelloWorldApi])
paul-jean/ud858
Lesson_2/000_Hello_Endpoints/helloworld_api.py
Python
gpl-3.0
1,742
0.012055
""" Dictionary with lazy evaluation on access, via a supplied update function """ import itertools class LazyDict(dict): """ A dictionary type that lazily updates values when they are accessed. All the usual dictionary methods work as expected, with automatic lazy updates occuring behind the scenes whenever values are read from the dictionary. The optional ``items`` argument, if specified, is a mapping instance used to initialise the items in the :class:`LazyDict`. The ``update_value`` argument required by the :class:`LazyDict` constructor must be a function of the form: update_value(k, existing_value, member) -> updated_value This function is called whenever an item with the key ``k`` is read from the :class:`LazyDict`. The second argument ``existing_value``, is the value corresponding to the key ``k`` stored in the :class:`LazyDict`, or ``None``, if the key ``k`` is not contained in the :class:`LazyDict`. The third argument ``member`` is a boolean value indicating if there is an existing value stored under the key ``k``. This function is used as follows by the :class:`LazyDict`. Suppose that the value ``v`` has been stored in a :class:`LazyDict` object ``lazy_dict`` under the key ``k``, that is, ``lazy_dict[k] = v``. Then subsequently accessing this value in the usual manner:: v_updated = lazy_dict[k] is equivalent to the following two statements:: lazy_dict[k] = update_value(k, v, (k in lazy_dict)) v_updated = update_value(k, v, (k in lazy_dict)) Observe how the value stored in the :class:`LazyDict` under the key ``k`` is first updated, using the provided function, with the updated value then being the one returned. """ def __init__(self, update_value, items = None): """ Returns a LazyDict using the specified ``update_value`` function and optional initial dictionary arguments. """ self.update_value = update_value if items is None: dict.__init__(self) else: dict.__init__(items) def __getitem__(self, key): member = dict.__contains__(self, key) if member: existing_value = dict.__getitem__(self, key) else: existing_value = None # ensure measurement is up to date updated_value = self.update_value(key, existing_value, member) self[key] = updated_value return updated_value def copy(self): return LazyDict(self.update_value, dict.copy(self)) def itervalues(self): return itertools.imap((lambda k : self[k]), dict.iterkeys(self)) def iteritems(self): return itertools.imap((lambda k : (k, self[k])), dict.iterkeys(self)) def pop(self, *args): n_args = len(args) if n_args < 1: raise TypeError('pop expected at least 1 argument, got %d' % n_args) if n_args > 2: raise TypeError('pop expected at most 2 arguments, got %d' % n_args) k = args[0] if k in self: value = self[k] del self[k] return value else: if n_args == 2: return args[1] else: raise KeyError(str(k)) def popitem(self): key, value = dict.popitem(self) self[key] = value updated_value = self[key] del self[key] return key, updated_value def setdefault(self, k, x=None): if k in self: return self[k] else: self[k] = x return x def get(self, k, x=None): if k in self: return self[k] else: return x def values(self): return list(self.itervalues()) def items(self): return list(self.iteritems())
vikramsunkara/PyME
pyme/lazy_dict.py
Python
agpl-3.0
3,974
0.007549
VERSION = (2, 0, 4, 'final', 0) def get_version(): """ Returns a PEP 386-compliant version number from VERSION. """ assert len(VERSION) == 5 assert VERSION[3] in ('alpha', 'beta', 'rc', 'final') # Now build the two parts of the version number: # main = X.Y[.Z] # sub = .devN - for pre-alpha releases # | {a|b|c}N - for alpha, beta and rc releases parts = 2 if VERSION[2] == 0 else 3 main = '.'.join(str(x) for x in VERSION[:parts]) sub = '' if VERSION[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = mapping[VERSION[3]] + str(VERSION[4]) return str(main + sub)
opencloudinfra/orchestrator
venv/Lib/site-packages/registration/__init__.py
Python
gpl-3.0
666
0
import logging import os from lib.Settings import Settings from lib.Wrappers.NullLogger import NullLogger class Logger: def __init__(self, name): if 'UNITTESTING' in os.environ: self.logging = NullLogger() else: settings = Settings().getSettings() logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=settings["logs"]["level"]) self.logging = logging.getLogger(name) def debug(self, *args, **kwargs): self.logging.debug(*args, **kwargs) def info(self, *args, **kwargs): self.logging.info(*args, **kwargs) def warning(self, *args, **kwargs): self.logging.warning(*args, **kwargs) def error(self, *args, **kwargs): self.logging.error(*args, **kwargs) def critical(self, *args, **kwargs): self.logging.critical(*args, **kwargs) def log(self, *args, **kwargs): self.logging.log(*args, **kwargs)
Open365/Open365
lib/Wrappers/Logger.py
Python
agpl-3.0
1,007
0.000993
from epumgmt.api.actions import ACTIONS from epumgmt.main import ControlArg import optparse a = [] ALL_EC_ARGS_LIST = a ################################################################################ # EM ARGUMENTS # # The following cmdline arguments may be queried via Parameters, using either # the 'name' as the argument or simply the object like: # # params.get_arg_or_none(em_args.GRACE_PERIOD) # ################################################################################ ACTION = ControlArg("action", "-a") ACTION.help = optparse.SUPPRESS_HELP a.append(ACTION) CONF = ControlArg("conf", "-c", metavar="PATH") a.append(CONF) CONF.help = "Absolute path to main.conf. Required (shell script adds the default)." DRYRUN = ControlArg("dryrun", None, noval=True) #a.append(DRYRUN) DRYRUN.help = "Do as little real things as possible, will still affect filesystem, for example logs and information persistence. (not implemented yet)" KILLNUM = ControlArg("killnum", "-k", metavar="NUM") a.append(KILLNUM) KILLNUM.help = "For the fetchkill action, number of VMs to terminate." NAME = ControlArg("name", "-n", metavar="RUN_NAME") a.append(NAME) NAME.help = "Unique run name for logs and management. Can use across multiple invocations for launches that belong together." GRAPH_NAME = ControlArg("graphname", "-r", metavar="GRAPH_NAME") a.append(GRAPH_NAME) GRAPH_NAME.help = "For the generate-graph action, name of graph to generate: stacked-vms, job-tts, job-rate, node-info, or controller." GRAPH_TYPE = ControlArg("graphtype", "-t", metavar="GRAPH_TYPE") a.append(GRAPH_TYPE) GRAPH_TYPE.help = "For the generate-graph action, output file type: eps or png." WORKLOAD_FILE = ControlArg("workloadfilename", "-f", metavar="WORKLOAD_FILE") a.append(WORKLOAD_FILE) WORKLOAD_FILE.help = "For the execute-workload-test action, file name of workload definition file." WORKLOAD_TYPE = ControlArg("workloadtype", "-w", metavar="WORKLOAD_TYPE") a.append(WORKLOAD_TYPE) WORKLOAD_TYPE.help = "For the execute-workload-test and generate-graph actions: amqp or torque" CLOUDINITD_DIR = ControlArg("cloudinitdir", "-C", metavar="PATH") a.append(CLOUDINITD_DIR) CLOUDINITD_DIR.help = "Path to the directory where cloudinit databases are kept. default is ~/.cloudinit" REPORT_INSTANCE = ControlArg("instance-report", None, metavar="COLUMNS") #a.append(REPORT_INSTANCE) REPORT_INSTANCE.help = "Used with '--action %s'. Batch mode for machine parsing instance status. Report selected columns from choice of the following separated by comma: service,instanceid,iaas_state,iaas_state_time,heartbeat_time,heartbeat_state" % ACTIONS.STATUS REPORT_SERVICE = ControlArg("service-report", None, metavar="COLUMNS") #a.append(REPORT_SERVICE) REPORT_SERVICE.help = "Used with '--action %s'. Batch mode for machine parsing service status. Report selected columns from choice of the following separated by comma: service,de_state,de_conf" % ACTIONS.STATUS STATUS_NOUPDATE = ControlArg("no-update", None, noval=True) a.append(STATUS_NOUPDATE) STATUS_NOUPDATE.help = "Used with '--action %s'. If used, %s does not try to find any new information." % (ACTIONS.STATUS, ACTIONS.STATUS) KILLRUN_NOFETCH = ControlArg("no-fetch", None, noval=True) a.append(KILLRUN_NOFETCH) KILLRUN_NOFETCH.help = "Can be used with action %s and %s. If used, does not try to find any new information or get any logs." % (ACTIONS.KILLRUN, ACTIONS.FIND_VERSIONS) WRITE_REPORT = ControlArg("write-report", None, metavar="PATH") a.append(WRITE_REPORT) WRITE_REPORT.help = "Used with action %s. Also write report to the given path if it does not exist." % ACTIONS.FIND_VERSIONS NEWN = ControlArg("newn", None) a.append(NEWN) NEWN.help = "Used with '--action %s'. Syntax is controller_name:N[,controller_name:N,...]" % ACTIONS.RECONFIGURE_N CONTROLLER = ControlArg("controller", None) a.append(CONTROLLER) CONTROLLER.help = "Some actions only work on a specific controller"
nimbusproject/epumgmt
src/python/epumgmt/main/em_args.py
Python
apache-2.0
3,947
0.00532
import pingo ''' In order to use this set of cases, it is necessary to set the following attributes on your TestCase setUp: self.analog_input_pin_number = 0 self.expected_analog_input = 1004 self.expected_analog_ratio = 0.98 ''' class AnalogReadBasics(object): ''' Wire a 10K Ohm resistence from the AnalogPin to the GND. Then wire a 200 Ohm from the AnalogPin to the VND. This schema will provide a read of ~98% ''' def test_200ohmRead(self): pin = self.board.pins[self.analog_input_pin_number] pin.mode = pingo.ANALOG _input = pin.value # print "Value Read: ", _input assert self.expected_analog_input - 3 <= _input <= self.expected_analog_input + 3 def test_pin_ratio(self): pin = self.board.pins[self.analog_input_pin_number] pin.mode = pingo.ANALOG bits_resolution = (2 ** pin.bits) - 1 _input = pin.ratio(0, bits_resolution, 0.0, 1.0) # print "Value Read: ", _input # Two decimal places check assert abs(_input - self.expected_analog_ratio) < 10e-1 class AnalogExceptions(object): def test_wrong_output_mode(self): pin = self.board.pins[self.analog_input_pin_number] with self.assertRaises(pingo.ModeNotSuported): pin.mode = pingo.OUT
garoa/pingo
pingo/test/level1/cases.py
Python
mit
1,331
0.000751
#!/usr/bin/env python import sys import argparse import os import unittest2 as unittest from ruamel import yaml from smacha.util import Tester import rospy import rospkg import rostest ROS_TEMPLATES_DIR = '../src/smacha_ros/templates' TEMPLATES_DIR = 'smacha_templates/smacha_test_examples' WRITE_OUTPUT_FILES = False OUTPUT_PY_DIR = '/tmp/smacha/smacha_test_examples/smacha_generated_py' OUTPUT_YML_DIR = '/tmp/smacha/smacha_test_examples/smacha_generated_scripts' CONF_FILE = 'test_examples_config.yml' DEBUG_LEVEL = 1 CONF_DICT = {} class TestGenerate(Tester): """Tester class for general unit testing of various SMACHA tool functionalities. The tests run by this class are performed by generating code using SMACHA scripts and templates and comparing the generated output code to the expected code from hand-written code samples. This includes testing both SMACHA YAML scripts generated by, e.g. the :func:`smacha.parser.contain` and :func:`smacha.parser.extract` methods, and Python code generated by the :func:`smacha.generator.run` method. """ def __init__(self, *args, **kwargs): # Set Tester member variables self.set_write_output_files(WRITE_OUTPUT_FILES) self.set_output_py_dir(OUTPUT_PY_DIR) self.set_output_yml_dir(OUTPUT_YML_DIR) self.set_debug_level(DEBUG_LEVEL) # Store the base path self._base_path = os.path.dirname(os.path.abspath(__file__)) # Call the parent constructor super(TestGenerate, self).__init__( *args, script_dirs=[os.path.join(self._base_path, 'smacha_scripts/smacha_test_examples')], template_dirs=[ os.path.join(self._base_path, ROS_TEMPLATES_DIR), os.path.join(self._base_path, TEMPLATES_DIR) ], **kwargs) def test_generate(self): """Test generating against baseline files""" for test_case in CONF_DICT['TEST_GENERATE']: with self.subTest(test_case=test_case): test_params = test_case.values()[0] script_file = test_params['script'] baseline = test_params['baseline'] with open(os.path.join(self._base_path, 'smacha_test_examples/{}'.format(baseline))) as original_file: generated_code = self._strip_uuids(self._generate(os.path.join(self._base_path, 'smacha_scripts/smacha_test_examples/{}'.format(script_file)))) original_code = original_file.read() self.assertTrue(self._compare(generated_code, original_code, file_a='generated', file_b='original')) if __name__=="__main__": # Read the configuration file before parsing arguments, try: base_path = os.path.dirname(os.path.abspath(__file__)) conf_file_loc = os.path.join(base_path, CONF_FILE) f = open(conf_file_loc) CONF_DICT = yaml.load(f) except Exception as e: print('Failed to read the configuration file. See error:\n{}'.format(e)) exit() if CONF_DICT.has_key('WRITE_OUTPUT_FILES'): WRITE_OUTPUT_FILES = CONF_DICT['WRITE_OUTPUT_FILES'] if CONF_DICT.has_key('OUTPUT_PY_DIR'): OUTPUT_PY_DIR = CONF_DICT['OUTPUT_PY_DIR'] if CONF_DICT.has_key('OUTPUT_YML_DIR'): OUTPUT_YML_DIR = CONF_DICT['OUTPUT_YML_DIR'] if CONF_DICT.has_key('DEBUG_LEVEL'): DEBUG_LEVEL = CONF_DICT['DEBUG_LEVEL'] rospy.init_node('test_smacha_ros_generate',log_level=rospy.DEBUG) rostest.rosrun('smacha_ros', 'test_smacha_ros_generate', TestGenerate)
ReconCell/smacha
smacha_ros/test/smacha_diff_test_examples.py
Python
bsd-3-clause
3,604
0.003607
import datetime import math import time import ephem from PyQt5 import QtCore from src.business.EphemObserverFactory import EphemObserverFactory from src.business.configuration.configProject import ConfigProject from src.business.configuration.settingsCamera import SettingsCamera from src.business.consoleThreadOutput import ConsoleThreadOutput from src.business.shooters.ContinuousShooterThread import ContinuousShooterThread class EphemerisShooter(QtCore.QThread): ''' classe para modo automatico ''' signal_started_shooting = QtCore.pyqtSignal(name="signalStartedShooting") signal_temp = QtCore.pyqtSignal(name="signalTemp") def __init__(self): super(EphemerisShooter, self).__init__() self.camconfig = SettingsCamera() self.camconfig.setup_settings() infocam = self.camconfig.get_camera_settings() self.ObserverFactory = EphemObserverFactory() self.continuousShooterThread = ContinuousShooterThread(int(infocam[4])) self.console = ConsoleThreadOutput() self.config = ConfigProject() info = self.config.get_geographic_settings() self.latitude = info[0] # '-45.51' self.longitude = info[1] # '-23.12' self.elevation = info[2] # 350 info_sun = self.config.get_moonsun_settings() self.max_solar_elevation = float(info_sun[0]) # -12 self.ignore_lunar_position = info_sun[1] self.max_lunar_elevation = float(info_sun[2]) # 8 self.max_lunar_phase = float(info_sun[3]) # 1 self.wait_temperature = False print(int(infocam[4])) try: self.s = int(infocam[4]) self.continuousShooterThread.set_sleep_time(self.s) except Exception as e: self.s = 5 self.shootOn = False self.controller = True self.count = 1 def refresh_data(self): try: info = self.config.get_geographic_settings() self.latitude = info[0] # '-45.51' self.longitude = info[1] # '-23.12' self.elevation = info[2] # 350 infosun = self.config.get_moonsun_settings() self.max_solar_elevation = float(infosun[0]) # -12 self.ignore_lunar_position = infosun[1] self.max_lunar_elevation = float(infosun[2]) # 8 self.max_lunar_phase = float(infosun[3]) # 1 except Exception as e: self.console.raise_text("Exception thrown to acquire information\n" "Please set an observatory information on settings\n" + str(e), level=3) self.latitude = 0 self.longitude = 0 self.elevation = 0 self.max_solar_elevation = 0 self.max_lunar_elevation = 0 self.max_lunar_phase = 0 infocam = self.camconfig.get_camera_settings() try: self.s = int(infocam[4]) except Exception as e: self.s = 0 def calculate_moon(self, obs): aux = obs aux.compute_pressure() aux.horizon = '8' moon = ephem.Moon(aux) return aux.previous_setting(moon), aux.next_rising(moon) def calculate_sun(self, obs): aux = obs aux.compute_pressure() aux.horizon = '-12' sun = ephem.Sun(aux) return aux.previous_setting(sun), aux.next_rising(sun) def set_solar_and_lunar_parameters(self, maxSolarElevation, maxLunarElevation, maxLunarPhase): self.max_solar_elevation = maxSolarElevation self.max_lunar_elevation = maxLunarElevation self.max_lunar_phase = maxLunarPhase def run(self): self.refresh_data() obs = self.ObserverFactory.create_observer(longitude=self.longitude, latitude=self.latitude, elevation=self.elevation) self.controller = True self.shootOn = False c = 0 try: while self.controller: obs.date = ephem.date(datetime.datetime.utcnow()) sun = ephem.Sun(obs) moon = ephem.Moon(obs) frac = moon.moon_phase a = ephem.degrees(sun.alt) b = ephem.degrees(str(moon.alt)) # Variavel de controle do shooter t = 0 # print("\n\n") # print("math.degrees(a) = " + str(math.degrees(a))) # print("self.max_solar_elevation = " + str(self.max_solar_elevation)) # print("self.ignore_lunar_position = " + str(self.ignore_lunar_position)) # print("math.degrees(b) = " + str(math.degrees(b))) # print("self.max_lunar_elevation = " + str(self.max_lunar_elevation)) # print("self.max_lunar_phase = " + str(self.max_lunar_phase)) # print("\n\n") if float(math.degrees(a)) < self.max_solar_elevation or t == 1: if (not self.ignore_lunar_position and float(math.degrees(b)) < self.max_lunar_elevation and frac < self.max_lunar_phase) or self.ignore_lunar_position: if not self.shootOn: if not c: self.signal_started_shooting.emit() c = 1 self.signal_temp.emit() time.sleep(5) if self.wait_temperature: # Iniciar as Observações self.start_taking_photo() self.shootOn = True else: if self.shootOn: # Finalizar as Observações self.stop_taking_photo() c = 0 self.t = False self.shootOn = False time.sleep(5) except Exception as e: self.console.raise_text("Exception no Ephemeris Shooter -> " + str(e)) def stop_shooter(self): self.controller = False self.continuousShooterThread.stop_continuous_shooter() def start_taking_photo(self): self.continuousShooterThread.set_sleep_time(self.s) self.continuousShooterThread.start_continuous_shooter() self.continuousShooterThread.start() def stop_taking_photo(self): self.continuousShooterThread.stop_continuous_shooter()
pliniopereira/ccd10
src/business/shooters/EphemerisShooter.py
Python
gpl-3.0
6,596
0.001365
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, determine_ext, int_or_none, sanitized_Request, ) class VoiceRepublicIE(InfoExtractor): _VALID_URL = r'https?://voicerepublic\.com/(?:talks|embed)/(?P<id>[0-9a-z-]+)' _TESTS = [{ 'url': 'http://voicerepublic.com/talks/watching-the-watchers-building-a-sousveillance-state', 'md5': 'b9174d651323f17783000876347116e3', 'info_dict': { 'id': '2296', 'display_id': 'watching-the-watchers-building-a-sousveillance-state', 'ext': 'm4a', 'title': 'Watching the Watchers: Building a Sousveillance State', 'description': 'Secret surveillance programs have metadata too. The people and companies that operate secret surveillance programs can be surveilled.', 'thumbnail': r're:^https?://.*\.(?:png|jpg)$', 'duration': 1800, 'view_count': int, } }, { 'url': 'http://voicerepublic.com/embed/watching-the-watchers-building-a-sousveillance-state', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) req = sanitized_Request( compat_urlparse.urljoin(url, '/talks/%s' % display_id)) # Older versions of Firefox get redirected to an "upgrade browser" page req.add_header('User-Agent', 'youtube-dl') webpage = self._download_webpage(req, display_id) if '>Queued for processing, please stand by...<' in webpage: raise ExtractorError( 'Audio is still queued for processing', expected=True) config = self._search_regex( r'(?s)return ({.+?});\s*\n', webpage, 'data', default=None) data = self._parse_json(config, display_id, fatal=False) if config else None if data: title = data['title'] description = data.get('teaser') talk_id = compat_str(data.get('talk_id') or display_id) talk = data['talk'] duration = int_or_none(talk.get('duration')) formats = [{ 'url': compat_urlparse.urljoin(url, talk_url), 'format_id': format_id, 'ext': determine_ext(talk_url) or format_id, 'vcodec': 'none', } for format_id, talk_url in talk['links'].items()] else: title = self._og_search_title(webpage) description = self._html_search_regex( r"(?s)<div class='talk-teaser'[^>]*>(.+?)</div>", webpage, 'description', fatal=False) talk_id = self._search_regex( [r"id='jc-(\d+)'", r"data-shareable-id='(\d+)'"], webpage, 'talk id', default=None) or display_id duration = None player = self._search_regex( r"class='vr-player jp-jplayer'([^>]+)>", webpage, 'player') formats = [{ 'url': compat_urlparse.urljoin(url, talk_url), 'format_id': format_id, 'ext': determine_ext(talk_url) or format_id, 'vcodec': 'none', } for format_id, talk_url in re.findall(r"data-([^=]+)='([^']+)'", player)] self._sort_formats(formats) thumbnail = self._og_search_thumbnail(webpage) view_count = int_or_none(self._search_regex( r"class='play-count[^']*'>\s*(\d+) plays", webpage, 'play count', fatal=False)) return { 'id': talk_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'formats': formats, }
valmynd/MediaFetcher
src/plugins/youtube_dl/youtube_dl/extractor/voicerepublic.py
Python
gpl-3.0
3,272
0.025978
# Copyright 2018-present Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import errno import hashlib import os import shutil import sys import tempfile import textwrap import file_locks from tracing import Tracing def get_file_contents_if_exists(path, default=None): with Tracing("BuckProject.get_file_contents_if_it_exists", args={"path": path}): if not os.path.exists(path): return default with open(path) as f: contents = f.read().strip() return default if not contents else contents def write_contents_to_file(path, contents): with Tracing("BuckProject.write_contents_to_file", args={"path": path}): with open(path, "w") as output_file: output_file.write(str(contents)) def makedirs(path): try: os.makedirs(path) except OSError as e: # Potentially the case that multiple processes are running in parallel # (e.g. a series of linters running buck query without buckd), so we # should just swallow the error. # This is mostly equivalent to os.makedirs(path, exist_ok=True) in # Python 3. if e.errno != errno.EEXIST and os.path.isdir(path): raise class BuckProject: def __init__(self, root): self.root = root self._buck_out = os.path.join(root, "buck-out") buck_out_tmp = os.path.join(self._buck_out, "tmp") makedirs(buck_out_tmp) self._buck_out_log = os.path.join(self._buck_out, "log") makedirs(self._buck_out_log) self.tmp_dir = tempfile.mkdtemp(prefix="buck_run.", dir=buck_out_tmp) # Only created if buckd is used. self.buckd_tmp_dir = None self.buckd_dir = os.path.join(root, ".buckd") self.buckd_version_file = os.path.join(self.buckd_dir, "buckd.version") self.buckd_pid_file = os.path.join(self.buckd_dir, "pid") self.buckd_stdout = os.path.join(self.buckd_dir, "stdout") self.buckd_stderr = os.path.join(self.buckd_dir, "stderr") buck_javaargs_path = os.path.join(self.root, ".buckjavaargs") self.buck_javaargs = get_file_contents_if_exists(buck_javaargs_path) buck_javaargs_path_local = os.path.join(self.root, ".buckjavaargs.local") self.buck_javaargs_local = get_file_contents_if_exists(buck_javaargs_path_local) def get_root_hash(self): return hashlib.sha256(self.root.encode("utf-8")).hexdigest() def get_buckd_transport_file_path(self): if os.name == "nt": return u"\\\\.\\pipe\\buckd_{0}".format(self.get_root_hash()) else: return os.path.join(self.buckd_dir, "sock") def get_buckd_transport_address(self): if os.name == "nt": return "local:buckd_{0}".format(self.get_root_hash()) else: return "local:.buckd/sock" def get_running_buckd_version(self): return get_file_contents_if_exists(self.buckd_version_file) def get_running_buckd_pid(self): try: return int(get_file_contents_if_exists(self.buckd_pid_file)) except ValueError: return None except TypeError: return None def get_buckd_stdout(self): return self.buckd_stdout def get_buckd_stderr(self): return self.buckd_stderr def get_buck_out_log_dir(self): return self._buck_out_log def clean_up_buckd(self): with Tracing("BuckProject.clean_up_buckd"): if os.path.exists(self.buckd_dir): file_locks.rmtree_if_can_lock(self.buckd_dir) def create_buckd_tmp_dir(self): if self.buckd_tmp_dir is not None: return self.buckd_tmp_dir tmp_dir_parent = os.path.join(self.buckd_dir, "tmp") makedirs(tmp_dir_parent) self.buckd_tmp_dir = tempfile.mkdtemp(prefix="buck_run.", dir=tmp_dir_parent) return self.buckd_tmp_dir def save_buckd_version(self, version): write_contents_to_file(self.buckd_version_file, version) def save_buckd_pid(self, pid): write_contents_to_file(self.buckd_pid_file, str(pid)) @staticmethod def from_current_dir(): with Tracing("BuckProject.from_current_dir"): current_dir = os.getcwd() if "--version" in sys.argv or "-V" in sys.argv: return BuckProject(current_dir) at_root_dir = False while not at_root_dir: if os.path.exists(os.path.join(current_dir, ".buckconfig")): return BuckProject(current_dir) parent_dir = os.path.dirname(current_dir) at_root_dir = current_dir == parent_dir current_dir = parent_dir raise NoBuckConfigFoundException() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): with Tracing("BuckProject.__exit__"): if os.path.exists(self.tmp_dir): try: shutil.rmtree(self.tmp_dir) except OSError as e: if e.errno != errno.ENOENT: raise class NoBuckConfigFoundException(Exception): def __init__(self): no_buckconfig_message_path = ".no_buckconfig_message" default_message = textwrap.dedent( """\ This does not appear to be the root of a Buck project. Please 'cd' to the root of your project before running buck. If this really is the root of your project, run 'touch .buckconfig' and then re-run your buck command.""" ) message = get_file_contents_if_exists( no_buckconfig_message_path, default_message ) Exception.__init__(self, message)
brettwooldridge/buck
programs/buck_project.py
Python
apache-2.0
6,332
0.000632
import asyncio import datetime import json from collections import Counter, defaultdict from pathlib import Path from typing import Mapping import aiohttp import discord from redbot.core import Config from redbot.core.bot import Red from redbot.core.commands import Cog from redbot.core.data_manager import cog_data_path from redbot.core.i18n import Translator, cog_i18n from ..utils import CacheLevel, PlaylistScope from . import abc, cog_utils, commands, events, tasks, utilities from .cog_utils import CompositeMetaClass _ = Translator("Audio", Path(__file__)) @cog_i18n(_) class Audio( commands.Commands, events.Events, tasks.Tasks, utilities.Utilities, Cog, metaclass=CompositeMetaClass, ): """Play audio through voice channels.""" _default_lavalink_settings = { "host": "localhost", "rest_port": 2333, "ws_port": 2333, "password": "youshallnotpass", } def __init__(self, bot: Red): super().__init__() self.bot = bot self.config = Config.get_conf(self, 2711759130, force_registration=True) self.api_interface = None self.player_manager = None self.playlist_api = None self.local_folder_current_path = None self.db_conn = None self._error_counter = Counter() self._error_timer = {} self._disconnected_players = {} self._daily_playlist_cache = {} self._daily_global_playlist_cache = {} self._persist_queue_cache = {} self._dj_status_cache = {} self._dj_role_cache = {} self.skip_votes = {} self.play_lock = {} self.lavalink_connect_task = None self._restore_task = None self.player_automated_timer_task = None self.cog_cleaned_up = False self.lavalink_connection_aborted = False self.permission_cache = discord.Permissions( embed_links=True, read_messages=True, send_messages=True, read_message_history=True, add_reactions=True, ) self.session = aiohttp.ClientSession(json_serialize=json.dumps) self.cog_ready_event = asyncio.Event() self._ws_resume = defaultdict(asyncio.Event) self._ws_op_codes = defaultdict(asyncio.LifoQueue) self.cog_init_task = None self.global_api_user = { "fetched": False, "can_read": False, "can_post": False, "can_delete": False, } self._ll_guild_updates = set() self._diconnected_shard = set() self._last_ll_update = datetime.datetime.now(datetime.timezone.utc) default_global = dict( schema_version=1, bundled_playlist_version=0, owner_notification=0, cache_level=CacheLevel.all().value, cache_age=365, daily_playlists=False, global_db_enabled=False, global_db_get_timeout=5, status=False, use_external_lavalink=False, restrict=True, localpath=str(cog_data_path(raw_name="Audio")), url_keyword_blacklist=[], url_keyword_whitelist=[], java_exc_path="java", **self._default_lavalink_settings, ) default_guild = dict( auto_play=False, currently_auto_playing_in=None, auto_deafen=True, autoplaylist=dict( enabled=True, id=42069, name="Aikaterna's curated tracks", scope=PlaylistScope.GLOBAL.value, ), persist_queue=True, disconnect=False, dj_enabled=False, dj_role=None, daily_playlists=False, emptydc_enabled=False, emptydc_timer=0, emptypause_enabled=False, emptypause_timer=0, jukebox=False, jukebox_price=0, maxlength=0, max_volume=150, notify=False, prefer_lyrics=False, repeat=False, shuffle=False, shuffle_bumped=True, thumbnail=False, volume=100, vote_enabled=False, vote_percent=0, room_lock=None, url_keyword_blacklist=[], url_keyword_whitelist=[], country_code="US", ) _playlist: Mapping = dict(id=None, author=None, name=None, playlist_url=None, tracks=[]) self.config.init_custom("EQUALIZER", 1) self.config.register_custom("EQUALIZER", eq_bands=[], eq_presets={}) self.config.init_custom(PlaylistScope.GLOBAL.value, 1) self.config.register_custom(PlaylistScope.GLOBAL.value, **_playlist) self.config.init_custom(PlaylistScope.GUILD.value, 2) self.config.register_custom(PlaylistScope.GUILD.value, **_playlist) self.config.init_custom(PlaylistScope.USER.value, 2) self.config.register_custom(PlaylistScope.USER.value, **_playlist) self.config.register_guild(**default_guild) self.config.register_global(**default_global) self.config.register_user(country_code=None)
palmtree5/Red-DiscordBot
redbot/cogs/audio/core/__init__.py
Python
gpl-3.0
5,241
0.000382
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.CsvDataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import zlib from tensorflow.python.data.experimental.ops import error_ops from tensorflow.python.data.experimental.ops import readers from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import readers as core_readers from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class CsvDatasetTest(test_base.DatasetTestBase): def _setup_files(self, inputs, linebreak='\n', compression_type=None): filenames = [] for i, ip in enumerate(inputs): fn = os.path.join(self.get_temp_dir(), 'temp_%d.csv' % i) contents = linebreak.join(ip).encode('utf-8') if compression_type is None: with open(fn, 'wb') as f: f.write(contents) elif compression_type == 'GZIP': with gzip.GzipFile(fn, 'wb') as f: f.write(contents) elif compression_type == 'ZLIB': contents = zlib.compress(contents) with open(fn, 'wb') as f: f.write(contents) else: raise ValueError('Unsupported compression_type', compression_type) filenames.append(fn) return filenames def _make_test_datasets(self, inputs, **kwargs): # Test by comparing its output to what we could get with map->decode_csv filenames = self._setup_files(inputs) dataset_expected = core_readers.TextLineDataset(filenames) dataset_expected = dataset_expected.map( lambda l: parsing_ops.decode_csv(l, **kwargs)) dataset_actual = readers.CsvDataset(filenames, **kwargs) return (dataset_actual, dataset_expected) def _test_by_comparison(self, inputs, **kwargs): """Checks that CsvDataset is equiv to TextLineDataset->map(decode_csv).""" dataset_actual, dataset_expected = self._make_test_datasets( inputs, **kwargs) self.assertDatasetsEqual(dataset_actual, dataset_expected) def _verify_output_or_err(self, dataset, expected_output=None, expected_err_re=None): if expected_err_re is None: # Verify that output is expected, without errors nxt = self.getNext(dataset) expected_output = [[ v.encode('utf-8') if isinstance(v, str) else v for v in op ] for op in expected_output] for value in expected_output: op = self.evaluate(nxt()) self.assertAllEqual(op, value) with self.assertRaises(errors.OutOfRangeError): self.evaluate(nxt()) else: nxt = self.getNext(dataset) while True: try: self.evaluate(nxt()) except errors.OutOfRangeError: break def _test_dataset( self, inputs, expected_output=None, expected_err_re=None, linebreak='\n', compression_type=None, # Used for both setup and parsing **kwargs): """Checks that elements produced by CsvDataset match expected output.""" # Convert str type because py3 tf strings are bytestrings filenames = self._setup_files(inputs, linebreak, compression_type) kwargs['compression_type'] = compression_type if expected_err_re is not None: # Verify that OpError is produced as expected with self.assertRaisesOpError(expected_err_re): dataset = readers.CsvDataset(filenames, **kwargs) self._verify_output_or_err(dataset, expected_output, expected_err_re) else: dataset = readers.CsvDataset(filenames, **kwargs) self._verify_output_or_err(dataset, expected_output, expected_err_re) def testCsvDataset_requiredFields(self): record_defaults = [[]] * 4 inputs = [['1,2,3,4']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_int(self): record_defaults = [[0]] * 4 inputs = [['1,2,3,4', '5,6,7,8']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_float(self): record_defaults = [[0.0]] * 4 inputs = [['1.0,2.1,3.2,4.3', '5.4,6.5,7.6,8.7']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_string(self): record_defaults = [['']] * 4 inputs = [['1.0,2.1,hello,4.3', '5.4,6.5,goodbye,8.7']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_withEmptyFields(self): record_defaults = [[0]] * 4 inputs = [[',,,', '1,1,1,', ',2,2,2']] self._test_dataset( inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]], record_defaults=record_defaults) def testCsvDataset_errWithUnquotedQuotes(self): record_defaults = [['']] * 3 inputs = [['1,2"3,4']] self._test_dataset( inputs, expected_err_re='Unquoted fields cannot have quotes inside', record_defaults=record_defaults) def testCsvDataset_errWithUnescapedQuotes(self): record_defaults = [['']] * 3 inputs = [['"a"b","c","d"']] self._test_dataset( inputs, expected_err_re= 'Quote inside a string has to be escaped by another quote', record_defaults=record_defaults) def testCsvDataset_ignoreErrWithUnescapedQuotes(self): record_defaults = [['']] * 3 inputs = [['1,"2"3",4', '1,"2"3",4",5,5', 'a,b,"c"d"', 'e,f,g']] filenames = self._setup_files(inputs) dataset = readers.CsvDataset(filenames, record_defaults=record_defaults) dataset = dataset.apply(error_ops.ignore_errors()) self._verify_output_or_err(dataset, [['e', 'f', 'g']]) def testCsvDataset_ignoreErrWithUnquotedQuotes(self): record_defaults = [['']] * 3 inputs = [['1,2"3,4', 'a,b,c"d', '9,8"7,6,5', 'e,f,g']] filenames = self._setup_files(inputs) dataset = readers.CsvDataset(filenames, record_defaults=record_defaults) dataset = dataset.apply(error_ops.ignore_errors()) self._verify_output_or_err(dataset, [['e', 'f', 'g']]) def testCsvDataset_withNoQuoteDelimAndUnquotedQuotes(self): record_defaults = [['']] * 3 inputs = [['1,2"3,4']] self._test_by_comparison( inputs, record_defaults=record_defaults, use_quote_delim=False) def testCsvDataset_mixedTypes(self): record_defaults = [ constant_op.constant([], dtype=dtypes.int32), constant_op.constant([], dtype=dtypes.float32), constant_op.constant([], dtype=dtypes.string), constant_op.constant([], dtype=dtypes.float64) ] inputs = [['1,2.1,3.2,4.3', '5,6.5,7.6,8.7']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_withUseQuoteDelimFalse(self): record_defaults = [['']] * 4 inputs = [['1,2,"3,4"', '"5,6",7,8']] self._test_by_comparison( inputs, record_defaults=record_defaults, use_quote_delim=False) def testCsvDataset_withFieldDelim(self): record_defaults = [[0]] * 4 inputs = [['1:2:3:4', '5:6:7:8']] self._test_by_comparison( inputs, record_defaults=record_defaults, field_delim=':') def testCsvDataset_withNaValue(self): record_defaults = [[0]] * 4 inputs = [['1,NA,3,4', 'NA,6,7,8']] self._test_by_comparison( inputs, record_defaults=record_defaults, na_value='NA') def testCsvDataset_withSelectCols(self): record_defaults = [['']] * 2 inputs = [['1,2,3,4', '"5","6","7","8"']] self._test_by_comparison( inputs, record_defaults=record_defaults, select_cols=[1, 2]) def testCsvDataset_withSelectColsTooHigh(self): record_defaults = [[0]] * 2 inputs = [['1,2,3,4', '5,6,7,8']] self._test_dataset( inputs, expected_err_re='Expect 2 fields but have 1 in record', record_defaults=record_defaults, select_cols=[3, 4]) def testCsvDataset_withOneCol(self): record_defaults = [['NA']] inputs = [['0', '', '2']] self._test_dataset( inputs, [['0'], ['NA'], ['2']], record_defaults=record_defaults) def testCsvDataset_withMultipleFiles(self): record_defaults = [[0]] * 4 inputs = [['1,2,3,4', '5,6,7,8'], ['5,6,7,8']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_withLeadingAndTrailingSpaces(self): record_defaults = [[0.0]] * 4 inputs = [['0, 1, 2, 3']] expected = [[0.0, 1.0, 2.0, 3.0]] self._test_dataset(inputs, expected, record_defaults=record_defaults) def testCsvDataset_errorWithMissingDefault(self): record_defaults = [[]] * 2 inputs = [['0,']] self._test_dataset( inputs, expected_err_re='Field 1 is required but missing in record!', record_defaults=record_defaults) def testCsvDataset_errorWithFewerDefaultsThanFields(self): record_defaults = [[0.0]] * 2 inputs = [['0,1,2,3']] self._test_dataset( inputs, expected_err_re='Expect 2 fields but have more in record', record_defaults=record_defaults) def testCsvDataset_errorWithMoreDefaultsThanFields(self): record_defaults = [[0.0]] * 5 inputs = [['0,1,2,3']] self._test_dataset( inputs, expected_err_re='Expect 5 fields but have 4 in record', record_defaults=record_defaults) def testCsvDataset_withHeader(self): record_defaults = [[0]] * 2 inputs = [['col1,col2', '1,2']] expected = [[1, 2]] self._test_dataset( inputs, expected, record_defaults=record_defaults, header=True, ) def testCsvDataset_withHeaderAndNoRecords(self): record_defaults = [[0]] * 2 inputs = [['col1,col2']] expected = [] self._test_dataset( inputs, expected, record_defaults=record_defaults, header=True, ) def testCsvDataset_errorWithHeaderEmptyFile(self): record_defaults = [[0]] * 2 inputs = [[]] expected_err_re = "Can't read header of file" self._test_dataset( inputs, expected_err_re=expected_err_re, record_defaults=record_defaults, header=True, ) def testCsvDataset_withEmptyFile(self): record_defaults = [['']] * 2 inputs = [['']] # Empty file self._test_dataset( inputs, expected_output=[], record_defaults=record_defaults) def testCsvDataset_errorWithEmptyRecord(self): record_defaults = [['']] * 2 inputs = [['', '1,2']] # First record is empty self._test_dataset( inputs, expected_err_re='Expect 2 fields but have 1 in record', record_defaults=record_defaults) def testCsvDataset_withChainedOps(self): # Testing that one dataset can create multiple iterators fine. # `repeat` creates multiple iterators from the same C++ Dataset. record_defaults = [[0]] * 4 inputs = [['1,,3,4', '5,6,,8']] ds_actual, ds_expected = self._make_test_datasets( inputs, record_defaults=record_defaults) self.assertDatasetsEqual( ds_actual.repeat(5).prefetch(1), ds_expected.repeat(5).prefetch(1)) def testCsvDataset_withTypeDefaults(self): # Testing using dtypes as record_defaults for required fields record_defaults = [dtypes.float32, [0.0]] inputs = [['1.0,2.0', '3.0,4.0']] self._test_dataset( inputs, [[1.0, 2.0], [3.0, 4.0]], record_defaults=record_defaults, ) def testMakeCsvDataset_fieldOrder(self): data = [[ '1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19', '1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19' ]] file_path = self._setup_files(data) ds = readers.make_csv_dataset( file_path, batch_size=1, shuffle=False, num_epochs=1) nxt = self.getNext(ds) result = list(self.evaluate(nxt()).values()) self.assertEqual(result, sorted(result)) ## The following tests exercise parsing logic for quoted fields def testCsvDataset_withQuoted(self): record_defaults = [['']] * 4 inputs = [['"a","b","c :)","d"', '"e","f","g :(","h"']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_withOneColAndQuotes(self): record_defaults = [['']] inputs = [['"0"', '"1"', '"2"']] self._test_dataset( inputs, [['0'], ['1'], ['2']], record_defaults=record_defaults) def testCsvDataset_withNewLine(self): # In this case, we expect it to behave differently from # TextLineDataset->map(decode_csv) since that flow has bugs record_defaults = [['']] * 4 inputs = [['a,b,"""c""\n0","d\ne"', 'f,g,h,i']] expected = [['a', 'b', '"c"\n0', 'd\ne'], ['f', 'g', 'h', 'i']] self._test_dataset(inputs, expected, record_defaults=record_defaults) def testCsvDataset_withNewLineInUnselectedCol(self): record_defaults = [['']] inputs = [['1,"2\n3",4', '5,6,7']] self._test_dataset( inputs, expected_output=[['1'], ['5']], record_defaults=record_defaults, select_cols=[0]) def testCsvDataset_withMultipleNewLines(self): # In this case, we expect it to behave differently from # TextLineDataset->map(decode_csv) since that flow has bugs record_defaults = [['']] * 4 inputs = [['a,"b\n\nx","""c""\n \n0","d\ne"', 'f,g,h,i']] expected = [['a', 'b\n\nx', '"c"\n \n0', 'd\ne'], ['f', 'g', 'h', 'i']] self._test_dataset(inputs, expected, record_defaults=record_defaults) def testCsvDataset_errorWithTerminateMidRecord(self): record_defaults = [['']] * 4 inputs = [['a,b,c,"a']] self._test_dataset( inputs, expected_err_re= 'Reached end of file without closing quoted field in record', record_defaults=record_defaults) def testCsvDataset_withEscapedQuotes(self): record_defaults = [['']] * 4 inputs = [['1.0,2.1,"she said: ""hello""",4.3', '5.4,6.5,goodbye,8.7']] self._test_by_comparison(inputs, record_defaults=record_defaults) ## Testing that parsing works with all buffer sizes, quoted/unquoted fields, ## and different types of line breaks def testCsvDataset_withInvalidBufferSize(self): record_defaults = [['']] * 4 inputs = [['a,b,c,d']] self._test_dataset( inputs, expected_err_re='buffer_size should be positive', record_defaults=record_defaults, buffer_size=0) def _test_dataset_on_buffer_sizes(self, inputs, expected, linebreak, record_defaults, compression_type=None, num_sizes_to_test=20): # Testing reading with a range of buffer sizes that should all work. for i in list(range(1, 1 + num_sizes_to_test)) + [None]: self._test_dataset( inputs, expected, linebreak=linebreak, compression_type=compression_type, record_defaults=record_defaults, buffer_size=i) def testCsvDataset_withLF(self): record_defaults = [['NA']] * 3 inputs = [['abc,def,ghi', '0,1,2', ',,']] expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\n', record_defaults=record_defaults) def testCsvDataset_withCR(self): # Test that when the line separator is '\r', parsing works with all buffer # sizes record_defaults = [['NA']] * 3 inputs = [['abc,def,ghi', '0,1,2', ',,']] expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r', record_defaults=record_defaults) def testCsvDataset_withCRLF(self): # Test that when the line separator is '\r\n', parsing works with all buffer # sizes record_defaults = [['NA']] * 3 inputs = [['abc,def,ghi', '0,1,2', ',,']] expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r\n', record_defaults=record_defaults) def testCsvDataset_withBufferSizeAndQuoted(self): record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\n', record_defaults=record_defaults) def testCsvDataset_withCRAndQuoted(self): # Test that when the line separator is '\r', parsing works with all buffer # sizes record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r', record_defaults=record_defaults) def testCsvDataset_withCRLFAndQuoted(self): # Test that when the line separator is '\r\n', parsing works with all buffer # sizes record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r\n', record_defaults=record_defaults) def testCsvDataset_withGzipCompressionType(self): record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r\n', compression_type='GZIP', record_defaults=record_defaults) def testCsvDataset_withZlibCompressionType(self): record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r\n', compression_type='ZLIB', record_defaults=record_defaults) def testCsvDataset_withScalarDefaults(self): record_defaults = [constant_op.constant(0, dtype=dtypes.int64)] * 4 inputs = [[',,,', '1,1,1,', ',2,2,2']] self._test_dataset( inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]], record_defaults=record_defaults) def testCsvDataset_with2DDefaults(self): record_defaults = [constant_op.constant([[0]], dtype=dtypes.int64)] * 4 inputs = [[',,,', '1,1,1,', ',2,2,2']] if context.executing_eagerly(): err_spec = errors.InvalidArgumentError, ( 'Each record default should be at ' 'most rank 1') else: err_spec = ValueError, 'Shape must be at most rank 1 but is rank 2' with self.assertRaisesWithPredicateMatch(*err_spec): self._test_dataset( inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]], record_defaults=record_defaults) if __name__ == '__main__': test.main()
chemelnucfin/tensorflow
tensorflow/python/data/experimental/kernel_tests/csv_dataset_test.py
Python
apache-2.0
20,082
0.004531
from scraper import * WD2_url = "http://www.cms-ud.com/UD/table/WD2.htm"; crit_name_WD2 = "WD2/" WD2_list = find_urls(url=WD2_url,crit_name=crit_name_WD2) for url in WD2_list: try: url = url.replace('^','%5E') url = "http://www.cms-ud.com/UD/table/"+url design = find_design(url) #print(design) run,factor,level = find_design_size(url,"WD2") file_name = "WD2_"+str(run)+"_"+str(factor)+"_"+str(level) save_design(design,name=file_name,save_path='./design_data/WD2/') except: print("Some errors happen at url: %s",url)
HAOYU-LI/UniDOE
Scraper/WD2.py
Python
apache-2.0
590
0.020339
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for CreateContext # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-aiplatform # [START aiplatform_v1_generated_MetadataService_CreateContext_sync] from google.cloud import aiplatform_v1 def sample_create_context(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.CreateContextRequest( parent="parent_value", ) # Make the request response = client.create_context(request=request) # Handle the response print(response) # [END aiplatform_v1_generated_MetadataService_CreateContext_sync]
googleapis/python-aiplatform
samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_sync.py
Python
apache-2.0
1,468
0.000681
# Copyright 2021 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Key Values Stores store the data associated with the embeddings indexed by the `Indexer()`. Each key of the store represent a **record** that contains information about a given embedding. The main use-case for the store is to retrieve the records associated with the ids returned by a nearest neigboor search performed with the [`Search()` module](../search/). Additionally one might want to inspect the content of the index which is why `Store()` class may implement an export to a [Pandas Dataframe](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) via the `to_pandas()` method. """ from .store import Store # noqa from .memory_store import MemoryStore # noqa
tensorflow/similarity
tensorflow_similarity/stores/__init__.py
Python
apache-2.0
1,292
0.000774
# # Copyright 2014 Telefonica Investigacion y Desarrollo, S.A.U # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Unit tests for SPASSWORD checker.""" from keystone import tests from keystone import exception import keystone_spassword.contrib.spassword.checker class TestPasswordChecker(tests.BaseTestCase): def test_checker(self): new_password = "stronger" self.assertRaises(exception.ValidationError, checker.strong_check_password(new_password))
telefonicaid/fiware-keystone-spassword
keystone_spassword/tests/unit/contrib/spassword/test_checker.py
Python
apache-2.0
1,246
0.002408
# -*- coding: utf-8 -*- __author__ = 'Tom Chen' import urllib2,sys,re,time from sgmllib import SGMLParser from datetime import datetime,date from urllib import unquote,quote default_encoding = 'utf-8' #设置文件使用UTF-8编码 if sys.getdefaultencoding() != default_encoding: reload(sys) sys.setdefaultencoding(default_encoding) class findbilibili(SGMLParser): #分析HTML源代码 def __init__(self): SGMLParser.__init__(self) self.is_script = '' self.url = [] self.videonum = [] self.is_li = '' self.seasonnum = [] self.season = [] self.is_a = '' self.num = [] def start_script(self,attrs): try: if attrs[0][0] == 'language' and attrs[0][1] == 'javascript': self.is_script = 'num' except IndexError: pass def end_script(self): self.is_script = "" def start_li(self,attrs): try: if attrs[0][0] == 'season_id' and attrs[1][0] == 'id': if re.match(r's_\d+',attrs[1][1]): self.is_li = 'season' self.seasonnum.append(attrs[0][1]) except IndexError: pass def end_li(self): self.is_li = '' def start_a(self,attrs): try: if attrs[0][0] == 'class' and attrs[0][1] == 't': if attrs[1][0] == 'href' and re.match(r'/video/av\d+',attrs[1][1]): if attrs[2][0] == 'target' and attrs[2][1] == '_blank': self.is_a = 'url' self.url.append(attrs[1][1]) except IndexError: pass def end_a(self): self.is_a = '' def handle_data(self, data): if self.is_script == 'num': self.videonum.append(data) if self.is_li == 'season': self.season.append(data) if self.is_a == 'url': self.num.append(data) #funtion name [bilibili] #在bilibili上抓取动画网址 #param string 动画名字 #return array[array] 2维数组 [1,[第一集][地址]][2,[][]][3...]... def bilibili(sname): name = sname name = unquote(name) l = name.split(' ') m = [] s = '' rename = re.compile('第') if len(l) != 1: s = l[len(l)-1] if rename.findall(s): m = name.split(s) else: m.append(name) if s == '续': s = '第二季' else: m.append(name) m[0] = quote(m[0]) if name == '无头骑士异闻录×2 转': s = name if name == '无头骑士异闻录×2 承': s = name url = 'http://www.bilibili.com/sp/'+m[0] user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' #伪装浏览器请求数据 headers = { 'User-Agent' : user_agent } request = urllib2.Request(url, headers=headers) try: content = urllib2.urlopen(request).read() except urllib2.HTTPError: return [] listname = findbilibili() listname.feed(content) rename = re.compile(r'\d+') try: videoid = rename.findall(listname.videonum[0]) except IndexError: return [] videoid2 = '' try: n = len(listname.season) a = 0 for a in range(n): if listname.season[a] == s: videoid2 = listname.seasonnum[a] break if videoid2 == '': videoid2 = listname.seasonnum[0] except IndexError: pass if videoid2: y = '-' else: y = '' try: url = 'http://www.bilibili.com/sppage/bangumi-'+videoid[0]+y+videoid2+'-1.html' except IndexError: return [] user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' #伪装浏览器请求数据 headers = { 'User-Agent' : user_agent } request = urllib2.Request(url, headers=headers) content = urllib2.urlopen(request).read() listname = findbilibili() listname.feed(content) n = len(listname.url) a = 0 for a in range(n): listname.url[a] = 'http://www.bilibili.com'+listname.url[a] rename = re.compile(r'\d+') l = [] for a in range(n): z = rename.findall(listname.num[a]) zz = ''.join(z) l.append(zz) dname = [] qname = [] a = 0 for a in range(n): x = [] x.append(l[a]) x.append(listname.url[a]) qname.append(x) for a in range(n): dname.append(qname[n - a -1]) return dname if __name__ == '__main__': name = '噬神者'.encode('gbk') newname = name.decode('gbk') newname = newname.encode('utf-8') print newname bilibili(newname)
cwdtom/qqbot
tom/findbilibili.py
Python
gpl-3.0
4,769
0.009097
# (C) British Crown Copyright 2010 - 2014, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ A script to convert the standard names information from the provided XML file into a Python dictionary format. Takes two arguments: the first is the XML file to process and the second is the name of the file to write the Python dictionary file into. By default, Iris will use the source XML file: etc/cf-standard-name-table.xml as obtained from: http://cf-pcmdi.llnl.gov/documents/cf-standard-names """ from __future__ import (absolute_import, division, print_function) import argparse import pprint import xml.etree.ElementTree as ET STD_VALUES_FILE_TEMPLATE = ''' # (C) British Crown Copyright 2010 - 2014, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ This file contains a dictionary of standard value names that are mapped to another dictionary of other standard name attributes. Currently only the `canonical_unit` exists in these attribute dictionaries. This file is automatically generated. Do not edit this file by hand. The file will be generated during a standard build/installation: python setup.py build python setup.py install Also, the file can be re-generated in the source distribution via: python setup.py std_names Or for more control (e.g. to use an alternative XML file) via: python tools/generate_std_names.py XML_FILE MODULE_FILE """ from __future__ import (absolute_import, division, print_function) STD_NAMES = '''.lstrip() def process_name_table(tree, element_name, *child_elements): """ Yields a series of dictionaries with the key being the id of the entry element and the value containing another dictionary mapping other attributes of the standard name to their values, e.g. units, description, grib value etc. """ for elem in tree.iterfind(element_name): sub_section = {} for child_elem in child_elements: found_elem = elem.find(child_elem) sub_section[child_elem] = found_elem.text if found_elem is not None else None yield {elem.get("id") : sub_section} def to_dict(infile, outfile): values = {} aliases = {} tree = ET.parse(infile) for section in process_name_table(tree, 'entry', 'canonical_units'): values.update(section) for section in process_name_table(tree, 'alias', 'entry_id'): aliases.update(section) for key, valued in aliases.iteritems(): values.update({ key : {'canonical_units' : values.get(valued['entry_id']).get('canonical_units')} }) outfile.write(STD_VALUES_FILE_TEMPLATE + pprint.pformat(values)) if __name__ == "__main__": parser = argparse.ArgumentParser( description='Create Python code from CF standard name XML.') parser.add_argument('input', type=argparse.FileType(), metavar='INPUT', help='Path to CF standard name XML') parser.add_argument('output', type=argparse.FileType('w'), metavar='OUTPUT', help='Path to resulting Python code') args = parser.parse_args() to_dict(args.input, args.output)
Jozhogg/iris
tools/generate_std_names.py
Python
lgpl-3.0
4,434
0.001579
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from heat_integrationtests.common import test LOG = logging.getLogger(__name__) class CeilometerAlarmTest(test.HeatIntegrationTest): """Class is responsible for testing of ceilometer usage.""" def setUp(self): super(CeilometerAlarmTest, self).setUp() self.client = self.orchestration_client self.template = self._load_template(__file__, 'test_ceilometer_alarm.yaml', 'templates') def check_instance_count(self, stack_identifier, expected): stack = self.client.stacks.get(stack_identifier) actual = self._stack_output(stack, 'asg_size') if actual != expected: LOG.warn('check_instance_count exp:%d, act:%s' % (expected, actual)) return actual == expected def test_alarm(self): """Confirm we can create an alarm and trigger it.""" # 1. create the stack stack_identifier = self.stack_create(template=self.template) # 2. send ceilometer a metric (should cause the alarm to fire) sample = {} sample['counter_type'] = 'gauge' sample['counter_name'] = 'test_meter' sample['counter_volume'] = 1 sample['counter_unit'] = 'count' sample['resource_metadata'] = {'metering.stack_id': stack_identifier.split('/')[-1]} sample['resource_id'] = 'shouldnt_matter' self.metering_client.samples.create(**sample) # 3. confirm we get a scaleup. # Note: there is little point waiting more than 60s+time to scale up. self.assertTrue(test.call_until_true( 120, 2, self.check_instance_count, stack_identifier, 2))
rh-s/heat
heat_integrationtests/scenario/test_ceilometer_alarm.py
Python
apache-2.0
2,412
0
import decimal from datetime import datetime from django.conf import settings from django.conf.urls import url, include from pinax.stripe.forms import PlanForm from .base import ViewConfig invoices = [ dict(date=datetime(2017, 10, 1), subscription=dict(plan=dict(name="Pro")), period_start=datetime(2017, 10, 1), period_end=datetime(2017, 10, 31), total=decimal.Decimal("9.99"), paid=False), dict(date=datetime(2017, 9, 1), subscription=dict(plan=dict(name="Pro")), period_start=datetime(2017, 9, 1), period_end=datetime(2017, 9, 30), total=decimal.Decimal("9.99"), paid=True), dict(date=datetime(2017, 8, 1), subscription=dict(plan=dict(name="Beginner")), period_start=datetime(2017, 8, 1), period_end=datetime(2017, 8, 31), total=decimal.Decimal("5.99"), paid=True), dict(date=datetime(2017, 7, 1), subscription=dict(plan=dict(name="Beginner")), period_start=datetime(2017, 7, 1), period_end=datetime(2017, 7, 30), total=decimal.Decimal("5.99"), paid=True), ] card = dict(pk=1, brand="Visa", last4="4242", exp_month="10", exp_year="2030", created_at=datetime(2016, 4, 5)) methods = [ card ] subscription = dict(pk=1, current_period_start=datetime(2017, 10, 1), current_period_end=datetime(2017, 10, 31), plan=dict(name="Pro"), start=datetime(2017, 10, 1), status="active", invoice_set=dict(all=invoices)) subscriptions = [ subscription ] patch = "http://pinaxproject.com/pinax-design/patches/pinax-stripe.svg" label = "stripe" title = "Pinax Stripe" views = [ ViewConfig(pattern=r"^invoices-empty/$", template="pinax/stripe/invoice_list.html", name="invoice_list_empty", pattern_kwargs={}, object_list=[]), ViewConfig(pattern=r"^invoices/$", template="pinax/stripe/invoice_list.html", name="pinax_stripe_invoice_list", pattern_kwargs={}, object_list=invoices), ViewConfig(pattern=r"^methods-empty/$", template="pinax/stripe/paymentmethod_list.html", name="method_list_empty", pattern_kwargs={}, object_list=[]), ViewConfig(pattern=r"^methods/$", template="pinax/stripe/paymentmethod_list.html", name="pinax_stripe_payment_method_list", pattern_kwargs={}, object_list=methods), ViewConfig(pattern=r"^methods/create/$", template="pinax/stripe/paymentmethod_create.html", name="pinax_stripe_payment_method_create", pattern_kwargs={}, PINAX_STRIPE_PUBLIC_KEY=settings.PINAX_STRIPE_PUBLIC_KEY), ViewConfig(pattern=r"^methods/update/(?P<pk>\d+)/$", template="pinax/stripe/paymentmethod_update.html", name="pinax_stripe_payment_method_update", pattern_kwargs={"pk": 1}, object=card), ViewConfig(pattern=r"^methods/delete/(?P<pk>\d+)/", template="pinax/stripe/paymentmethod_delete.html", name="pinax_stripe_payment_method_delete", pattern_kwargs={"pk": 1}, object=card), ViewConfig(pattern=r"^subscriptions-empty/$", template="pinax/stripe/subscription_list.html", name="subscription_list_empty", pattern_kwargs={}, object_list=[]), ViewConfig(pattern=r"^subscriptions/$", template="pinax/stripe/subscription_list.html", name="pinax_stripe_subscription_list", pattern_kwargs={}, object_list=subscriptions), ViewConfig(pattern=r"^subscriptions/create/$", template="pinax/stripe/subscription_create.html", name="pinax_stripe_subscription_create", pattern_kwargs={}, form=PlanForm(), request=dict(user=dict(customer=dict(default_source="foo")))), ViewConfig(pattern=r"^subscriptions/update/(?P<pk>\d+)/$", template="pinax/stripe/subscription_update.html", name="pinax_stripe_subscription_update", pattern_kwargs={"pk": 1}, object=subscription, form=PlanForm(), PINAX_STRIPE_PUBLIC_KEY=settings.PINAX_STRIPE_PUBLIC_KEY), ViewConfig(pattern=r"^subscriptions/delete/(?P<pk>\d+)/", template="pinax/stripe/subscription_delete.html", name="pinax_stripe_subscription_delete", pattern_kwargs={"pk": 1}, object=subscription), ] urlpatterns = [ view.url() for view in views ] url = url(r"payments/", include("pinax_theme_tester.configs.stripe"))
pinax/pinax_theme_tester
pinax_theme_tester/configs/stripe.py
Python
mit
3,913
0.0046
""" Derived module from filehandler.py to handle STereoLithography files. """ import numpy as np from mpl_toolkits import mplot3d from matplotlib import pyplot from stl import mesh, Mode import pygem.filehandler as fh class StlHandler(fh.FileHandler): """ STereoLithography file handler class :cvar string infile: name of the input file to be processed. :cvar string outfile: name of the output file where to write in. :cvar string extension: extension of the input/output files. It is equal to '.stl'. """ def __init__(self): super(StlHandler, self).__init__() self.extension = '.stl' def parse(self, filename): """ Method to parse the `filename`. It returns a matrix with all the coordinates. :param string filename: name of the input file. :return: mesh_points: it is a `n_points`-by-3 matrix containing the coordinates of the points of the mesh :rtype: numpy.ndarray .. todo:: - specify when it works """ self._check_filename_type(filename) self._check_extension(filename) self.infile = filename stl_mesh = mesh.Mesh.from_file(self.infile) mesh_points = np.array([stl_mesh.x.ravel(), stl_mesh.y.ravel(), stl_mesh.z.ravel()]) mesh_points = mesh_points.T return mesh_points def write(self, mesh_points, filename, write_bin=False): """ Writes a stl file, called filename, copying all the lines from self.filename but the coordinates. mesh_points is a matrix that contains the new coordinates to write in the stl file. :param numpy.ndarray mesh_points: it is a `n_points`-by-3 matrix containing the coordinates of the points of the mesh. :param string filename: name of the output file. :param boolean write_bin: flag to write in the binary format. Default is False. """ self._check_filename_type(filename) self._check_extension(filename) self._check_infile_instantiation(self.infile) self.outfile = filename n_vertices = mesh_points.shape[0] # number of triplets of vertices n_triplets = n_vertices/3 data = np.zeros(n_triplets, dtype=mesh.Mesh.dtype) stl_mesh = mesh.Mesh(data, remove_empty_areas=False) for i in range(0, n_triplets): for j in range(0, 3): data['vectors'][i][j] = mesh_points[3*i + j] if not write_bin: stl_mesh.save(self.outfile, mode=Mode.ASCII, update_normals=True) else: stl_mesh.save(self.outfile, update_normals=True) def plot(self, plot_file=None, save_fig=False): """ Method to plot an stl file. If `plot_file` is not given it plots `self.infile`. :param string plot_file: the stl filename you want to plot. :param bool save_fig: a flag to save the figure in png or not. If True the plot is not shown. :return: figure: matlplotlib structure for the figure of the chosen geometry :rtype: matplotlib.pyplot.figure """ if plot_file is None: plot_file = self.infile else: self._check_filename_type(plot_file) # Create a new plot figure = pyplot.figure() axes = mplot3d.Axes3D(figure) # Load the STL files and add the vectors to the plot stl_mesh = mesh.Mesh.from_file(plot_file) axes.add_collection3d(mplot3d.art3d.Poly3DCollection(stl_mesh.vectors)) ## Get the limits of the axis and center the geometry max_dim = np.array([np.max(stl_mesh.vectors[:,:,0]), \ np.max(stl_mesh.vectors[:,:,1]), \ np.max(stl_mesh.vectors[:,:,2])]) min_dim = np.array([np.min(stl_mesh.vectors[:,:,0]), \ np.min(stl_mesh.vectors[:,:,1]), \ np.min(stl_mesh.vectors[:,:,2])]) max_lenght = np.max(max_dim - min_dim) axes.set_xlim(-.6*max_lenght + (max_dim[0]+min_dim[0])/2, .6*max_lenght + (max_dim[0]+min_dim[0])/2) axes.set_ylim(-.6*max_lenght + (max_dim[1]+min_dim[1])/2, .6*max_lenght + (max_dim[1]+min_dim[1])/2) axes.set_zlim(-.6*max_lenght + (max_dim[2]+min_dim[2])/2, .6*max_lenght + (max_dim[2]+min_dim[2])/2) # Show the plot to the screen if not save_fig: pyplot.show() else: figure.savefig(plot_file.split('.')[0] + '.png') return figure
fsalmoir/PyGeM
pygem/stlhandler.py
Python
mit
3,986
0.032614
# $Id$ # importing this module shouldn't directly cause other large imports # do large imports in the init() hook so that you can call back to the # ModuleManager progress handler methods. """vtk_kit package driver file. This performs all initialisation necessary to use VTK from DeVIDE. Makes sure that all VTK classes have ErrorEvent handlers that report back to the ModuleManager. Inserts the following modules in sys.modules: vtk, vtkdevide. @author: Charl P. Botha <http://cpbotha.net/> """ import re import sys import traceback import types VERSION = '' def preImportVTK(progressMethod): vtkImportList = [('vtk.common', 'VTK Common.'), ('vtk.filtering', 'VTK Filtering.'), ('vtk.io', 'VTK IO.'), ('vtk.imaging', 'VTK Imaging.'), ('vtk.graphics', 'VTK Graphics.'), ('vtk.rendering', 'VTK Rendering.'), ('vtk.hybrid', 'VTK Hybrid.'), #('vtk.patented', 'VTK Patented.'), ('vtk', 'Other VTK symbols')] # set the dynamic loading flags. If we don't do this, we get strange # errors on 64 bit machines. To see this happen, comment this statement # and then run the VTK->ITK connection test case. oldflags = setDLFlags() percentStep = 100.0 / len(vtkImportList) currentPercent = 0.0 # do the imports for module, message in vtkImportList: currentPercent += percentStep progressMethod(currentPercent, 'Initialising vtk_kit: %s' % (message,), noTime=True) exec('import %s' % (module,)) # restore previous dynamic loading flags resetDLFlags(oldflags) def setDLFlags(): # brought over from ITK Wrapping/CSwig/Python # Python "help(sys.setdlopenflags)" states: # # setdlopenflags(...) # setdlopenflags(n) -> None # # Set the flags that will be used for dlopen() calls. Among other # things, this will enable a lazy resolving of symbols when # importing a module, if called as sys.setdlopenflags(0) To share # symbols across extension modules, call as # # sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL) # # GCC 3.x depends on proper merging of symbols for RTTI: # http://gcc.gnu.org/faq.html#dso # try: import dl newflags = dl.RTLD_NOW|dl.RTLD_GLOBAL except: newflags = 0x102 # No dl module, so guess (see above). try: oldflags = sys.getdlopenflags() sys.setdlopenflags(newflags) except: oldflags = None return oldflags def resetDLFlags(data): # brought over from ITK Wrapping/CSwig/Python # Restore the original dlopen flags. try: sys.setdlopenflags(data) except: pass def init(module_manager, pre_import=True): # first do the VTK pre-imports: this is here ONLY to keep the user happy # it's not necessary for normal functioning if pre_import: preImportVTK(module_manager.setProgress) # import the main module itself # the global is so that users can also do: # from module_kits import vtk_kit # vtk_kit.vtk.vtkSomeFilter() global vtk import vtk # and do the same for vtkdevide global vtkdevide import vtkdevide # load up some generic functions into this namespace # user can, after import of module_kits.vtk_kit, address these as # module_kits.vtk_kit.blaat. In this case we don't need "global", # as these are modules directly in this package. import module_kits.vtk_kit.misc as misc import module_kits.vtk_kit.mixins as mixins import module_kits.vtk_kit.utils as utils import module_kits.vtk_kit.constants as constants import module_kits.vtk_kit.color_scales as color_scales # setup the kit version global VERSION VERSION = '%s' % (vtk.vtkVersion.GetVTKVersion(),)
nagyistoce/devide
module_kits/vtk_kit/__init__.py
Python
bsd-3-clause
3,965
0.003279
# Patchwork - automated patch tracking system # Copyright (C) 2008 Jeremy Kerr <jk@ozlabs.org> # Copyright (C) 2015 Intel Corporation # # This file is part of the Patchwork package. # # Patchwork is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Patchwork is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Patchwork; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from collections import Counter, OrderedDict import datetime import jsonfield import random import re import patchwork.threadlocalrequest as threadlocalrequest from django.conf import settings from django.contrib import auth from django.contrib.auth.models import User from django.contrib.sites.models import Site from django.core.urlresolvers import reverse from django.db import models from django.db.models import Q import django.dispatch from django.utils.encoding import python_2_unicode_compatible from django.utils.functional import cached_property from django.utils.six.moves import filter from patchwork.fields import HashField from patchwork.parser import hash_patch, extract_tags @python_2_unicode_compatible class Person(models.Model): email = models.CharField(max_length=255, unique=True) name = models.CharField(max_length=255, null=True, blank=True) user = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL) def display_name(self): if self.name: return self.name else: return self.email def email_name(self): if (self.name): return "\"%s\" <%s>" % (self.name, self.email) else: return self.email def link_to_user(self, user): self.name = user.profile.name() self.user = user def __str__(self): return self.display_name() class Meta: verbose_name_plural = 'People' def get_comma_separated_field(value): if not value: return [] tags = [v.strip() for v in value.split(',')] tags = [tag for tag in tags if tag] return tags @python_2_unicode_compatible class Project(models.Model): linkname = models.CharField(max_length=255, unique=True) name = models.CharField(max_length=255, unique=True) description = models.TextField(blank=True, null=True) listid = models.CharField(max_length=255) listemail = models.CharField(max_length=200) web_url = models.CharField(max_length=2000, blank=True) scm_url = models.CharField(max_length=2000, blank=True) webscm_url = models.CharField(max_length=2000, blank=True) send_notifications = models.BooleanField(default=False) use_tags = models.BooleanField(default=True) git_send_email_only = models.BooleanField(default=False) subject_prefix_tags = models.CharField(max_length=255, blank=True, help_text='Comma separated list of tags') @cached_property def tags(self): if not self.use_tags: return [] return list(Tag.objects.all()) def get_subject_prefix_tags(self): return get_comma_separated_field(self.subject_prefix_tags) def get_listemail_tag(self): return self.listemail.split("@")[0] def __str__(self): return self.name class Meta: ordering = ['linkname'] def user_name(user): if user.first_name or user.last_name: names = list(filter(bool, [user.first_name, user.last_name])) return u' '.join(names) return user.username auth.models.User.add_to_class('name', user_name) @python_2_unicode_compatible class DelegationRule(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) path = models.CharField(max_length=255) project = models.ForeignKey(Project, on_delete=models.CASCADE) priority = models.IntegerField(default=0) def __str__(self): return self.path class Meta: ordering = ['-priority', 'path'] unique_together = (('path', 'project')) @python_2_unicode_compatible class UserProfile(models.Model): user = models.OneToOneField(User, unique=True, related_name='profile', on_delete=models.CASCADE) primary_project = models.ForeignKey(Project, null=True, blank=True, on_delete=models.CASCADE) maintainer_projects = models.ManyToManyField(Project, related_name='maintainer_project', blank=True) send_email = models.BooleanField(default=False, help_text='Selecting this option allows patchwork to send ' 'email on your behalf') patches_per_page = models.PositiveIntegerField( default=100, null=False, blank=False, help_text='Number of patches to display per page') def name(self): return user_name(self.user) def contributor_projects(self): submitters = Person.objects.filter(user=self.user) return Project.objects.filter(id__in=Patch.objects.filter( submitter__in=submitters) .values('project_id').query) def sync_person(self): pass def n_todo(self): return self.todo_patches().count() + self.todo_series().count() def todo_patches(self, project=None): # filter on project, if necessary if project: qs = Patch.objects.filter(project=project) else: qs = Patch.objects qs = qs.filter(archived=False) \ .filter(delegate=self.user) \ .filter(state__in=State.objects.filter(action_required=True) .values('pk').query) return qs def todo_series(self, project=None): # filter on project, if necessary if project: qs = Series.objects.filter(project=project) else: qs = Series.objects qs = qs.filter(Q(reviewer=self.user), ~Q(last_revision__state=RevisionState.DONE)) return qs def __str__(self): return self.name() def _user_saved_callback(sender, created, instance, **kwargs): try: profile = instance.profile except UserProfile.DoesNotExist: profile = UserProfile(user=instance) profile.save() models.signals.post_save.connect(_user_saved_callback, sender=User) @python_2_unicode_compatible class State(models.Model): name = models.CharField(max_length=100) ordering = models.IntegerField(unique=True) action_required = models.BooleanField(default=True) @classmethod def from_string(cls, name): return State.objects.get(name__iexact=name) def __str__(self): return self.name class Meta: ordering = ['ordering'] @python_2_unicode_compatible class Tag(models.Model): name = models.CharField(max_length=20) pattern = models.CharField(max_length=50, help_text='A simple regex to match the tag in the content of ' 'a message. Will be used with MULTILINE and IGNORECASE ' 'flags. eg. ^Acked-by:') abbrev = models.CharField(max_length=2, unique=True, help_text='Short (one-or-two letter) abbreviation for the tag, ' 'used in table column headers') @property def attr_name(self): return 'tag_%d_count' % self.id def __str__(self): return self.name class Meta: ordering = ['abbrev'] class PatchTag(models.Model): patch = models.ForeignKey('Patch', on_delete=models.CASCADE) tag = models.ForeignKey('Tag', on_delete=models.CASCADE) count = models.IntegerField(default=1) class Meta: unique_together = [('patch', 'tag')] def get_default_initial_patch_state(): return State.objects.get(ordering=0) class PatchQuerySet(models.query.QuerySet): def with_tag_counts(self, project): if not project.use_tags: return self # We need the project's use_tags field loaded for Project.tags(). # Using prefetch_related means we'll share the one instance of # Project, and share the project.tags cache between all patch.project # references. qs = self.prefetch_related('project') select = OrderedDict() select_params = [] for tag in project.tags: select[tag.attr_name] = ( "coalesce(" "(SELECT count FROM patchwork_patchtag " "WHERE patchwork_patchtag.patch_id=patchwork_patch.id " "AND patchwork_patchtag.tag_id=%s), 0)") select_params.append(tag.id) return qs.extra(select=select, select_params=select_params) class PatchManager(models.Manager): use_for_related_fields = True def get_queryset(self): return PatchQuerySet(self.model, using=self.db) def with_tag_counts(self, project): return self.get_queryset().with_tag_counts(project) def filename(name, ext): fname_re = re.compile('[^-_A-Za-z0-9\.]+') str = fname_re.sub('-', name) return str.strip('-') + ext @python_2_unicode_compatible class Patch(models.Model): project = models.ForeignKey(Project, on_delete=models.CASCADE) msgid = models.CharField(max_length=255) name = models.CharField(max_length=255) date = models.DateTimeField(default=datetime.datetime.now) last_updated = models.DateTimeField(auto_now=True) submitter = models.ForeignKey(Person, on_delete=models.CASCADE) delegate = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE) state = models.ForeignKey(State, null=True, on_delete=models.CASCADE) archived = models.BooleanField(default=False) headers = models.TextField(blank=True) content = models.TextField(null=True, blank=True) pull_url = models.CharField(max_length=255, null=True, blank=True) commit_ref = models.CharField(max_length=255, null=True, blank=True) hash = HashField(null=True, blank=True) tags = models.ManyToManyField(Tag, through=PatchTag) objects = PatchManager() def commit_message(self): """Retrieves the commit message""" return Comment.objects.filter(patch=self, msgid=self.msgid) def answers(self): """Retrieves the answers (ie all comments but the commit message)""" return Comment.objects.filter(Q(patch=self) & ~Q(msgid=self.msgid)) def comments(self): """Retrieves all comments of this patch ie. the commit message and the answers""" return Comment.objects.filter(patch=self) def series(self): try: rev = SeriesRevisionPatch.objects.filter(patch=self)[0].revision return rev.series except Exception: return None def _set_tag(self, tag, count): if count == 0: self.patchtag_set.filter(tag=tag).delete() return (patchtag, _) = PatchTag.objects.get_or_create(patch=self, tag=tag) if patchtag.count != count: patchtag.count = count patchtag.save() def refresh_tag_counts(self): tags = self.project.tags counter = Counter() for comment in self.comment_set.all(): counter = counter + extract_tags(comment.content, tags) for tag in tags: self._set_tag(tag, counter[tag]) def save(self): if not hasattr(self, 'state') or not self.state: self.state = get_default_initial_patch_state() if self.hash is None and self.content is not None: self.hash = hash_patch(self.content).hexdigest() super(Patch, self).save() def filename(self): return filename(self.name, '.patch') def human_name(self): return self.name @models.permalink def get_absolute_url(self): return ('patch', (), {'patch_id': self.id}) def __str__(self): return self.name class Meta: verbose_name_plural = 'Patches' ordering = ['date'] unique_together = [('msgid', 'project')] class Comment(models.Model): patch = models.ForeignKey(Patch, on_delete=models.CASCADE) msgid = models.CharField(max_length=255) submitter = models.ForeignKey(Person, on_delete=models.CASCADE) date = models.DateTimeField(default=datetime.datetime.now) headers = models.TextField(blank=True) content = models.TextField() response_re = re.compile( '^((Tested|Reviewed|Acked|Signed-off|Nacked|Reported)-by|Fixes): .*$', re.M | re.I) def patch_responses(self): return ''.join([match.group(0) + '\n' for match in self.response_re.finditer(self.content)]) def save(self, *args, **kwargs): super(Comment, self).save(*args, **kwargs) self.patch.refresh_tag_counts() def delete(self, *args, **kwargs): super(Comment, self).delete(*args, **kwargs) self.patch.refresh_tag_counts() class Meta: ordering = ['date'] unique_together = [('msgid', 'patch')] class Bundle(models.Model): owner = models.ForeignKey(User, on_delete=models.CASCADE) project = models.ForeignKey(Project, on_delete=models.CASCADE) name = models.CharField(max_length=50, null=False, blank=False) patches = models.ManyToManyField(Patch, through='BundlePatch') public = models.BooleanField(default=False) def n_patches(self): return self.patches.all().count() def ordered_patches(self): return self.patches.order_by('bundlepatch__order') def append_patch(self, patch): # todo: use the aggregate queries in django 1.1 orders = BundlePatch.objects.filter(bundle=self).order_by('-order') \ .values('order') if len(orders) > 0: max_order = orders[0]['order'] else: max_order = 0 # see if the patch is already in this bundle if BundlePatch.objects.filter(bundle=self, patch=patch).count(): raise Exception("patch is already in bundle") bp = BundlePatch.objects.create(bundle=self, patch=patch, order=max_order + 1) bp.save() def public_url(self): if not self.public: return None site = Site.objects.get_current() return 'http://%s%s' % (site.domain, reverse('bundle', kwargs={ 'username': self.owner.username, 'bundlename': self.name })) @models.permalink def get_absolute_url(self): return ('bundle', (), { 'username': self.owner.username, 'bundlename': self.name, }) class Meta: unique_together = [('owner', 'name')] class BundlePatch(models.Model): patch = models.ForeignKey(Patch, on_delete=models.CASCADE) bundle = models.ForeignKey(Bundle, on_delete=models.CASCADE) order = models.IntegerField() class Meta: unique_together = [('bundle', 'patch')] ordering = ['order'] SERIES_DEFAULT_NAME = "Series without cover letter" class TestState: STATE_PENDING = 0 STATE_INFO = 1 STATE_SUCCESS = 2 STATE_WARNING = 3 STATE_FAILURE = 4 STATE_CHOICES = ( (STATE_PENDING, 'pending'), (STATE_INFO, 'info'), (STATE_SUCCESS, 'success'), (STATE_WARNING, 'warning'), (STATE_FAILURE, 'failure'), ) @classmethod def from_string(cls, s): s2i = {s: i for i, s in cls.STATE_CHOICES} return s2i[s] # This Model represents the "top level" Series, an object that doesn't change # with the various versions of patches sent to the mailing list. @python_2_unicode_compatible class Series(models.Model): project = models.ForeignKey(Project, on_delete=models.CASCADE) name = models.CharField(max_length=200, default=SERIES_DEFAULT_NAME) submitter = models.ForeignKey(Person, related_name='submitters', on_delete=models.CASCADE) reviewer = models.ForeignKey(User, related_name='reviewers', null=True, blank=True, on_delete=models.CASCADE) submitted = models.DateTimeField(default=datetime.datetime.now) last_updated = models.DateTimeField(auto_now=True) # direct access to the latest revision so we can get the latest revision # information with a JOIN last_revision = models.OneToOneField('SeriesRevision', null=True, related_name='+', on_delete=models.CASCADE) def revisions(self): return SeriesRevision.objects.filter(series=self) def latest_revision(self): return self.revisions().reverse()[0] def get_absolute_url(self): return reverse('series', kwargs={'series': self.pk}) def dump(self): print('') print('===') print('Series: %s' % self) print(' version: %d' % self.version) print(' n_patches: %d' % self.n_patches) for rev in self.revisions(): print(' rev %d:' % rev.version) i = 1 for patch in rev.ordered_patches(): print(' patch %d:' % i) print(' subject: %s' % patch.name) print(' msgid : %s' % patch.msgid) i += 1 def filename(self): return filename(self.name, '.mbox') def human_name(self): if self.name == SERIES_DEFAULT_NAME: if self.last_revision: ordered_patches = self.last_revision.ordered_patches() if ordered_patches: return "series starting with " + ordered_patches[0].name return "Incomplete Series" else: return self.name def __str__(self): return self.name class Meta: verbose_name_plural = 'Series' ordering = ["-id"] # Signal one can listen to to know when a revision is complete (ie. has all of # its patches) series_revision_complete = django.dispatch.Signal(providing_args=["revision"]) class RevisionState: INCOMPLETE = 0 INITIAL = 1 IN_PROGRESS = 2 DONE = 3 CHOICES = ( (INCOMPLETE, 'incomplete'), (INITIAL, 'initial'), (IN_PROGRESS, 'in progress'), (DONE, 'done'), ) i2s = dict(CHOICES) @classmethod def to_string(cls, i): return cls.i2s[i] @classmethod def from_string(cls, s): s2i = {s: i for i, s in cls.CHOICES} return s2i[s] # A 'revision' of a series. Resending a new version of a patch or a full new # iteration of a series will create a new revision. @python_2_unicode_compatible class SeriesRevision(models.Model): series = models.ForeignKey(Series, on_delete=models.CASCADE) version = models.IntegerField(default=1) root_msgid = models.CharField(max_length=255) cover_letter = models.TextField(null=True, blank=True) n_patches = models.IntegerField(default=0) patches = models.ManyToManyField(Patch, through='SeriesRevisionPatch') state = models.SmallIntegerField(choices=RevisionState.CHOICES, default=RevisionState.INCOMPLETE) state_summary = jsonfield.JSONField(null=True) test_state = models.SmallIntegerField(choices=TestState.STATE_CHOICES, null=True, blank=True) class Meta: unique_together = [('series', 'version')] ordering = ['version'] def ordered_patches(self): return self.patches.order_by('seriesrevisionpatch__order') def add_patch(self, patch, order): # see if the patch is already in this revision if SeriesRevisionPatch.objects.filter(revision=self, patch=patch).count(): raise Exception("patch is already in revision") sp = SeriesRevisionPatch.objects.create(revision=self, patch=patch, order=order) sp.save() revision_complete = self.patches.count() == self.n_patches if revision_complete: series_revision_complete.send(sender=self.__class__, revision=self) def duplicate_meta(self): new = SeriesRevision.objects.get(pk=self.pk) new.pk = None new.cover_letter = None new.version = self.version + 1 new.test_state = None new.save() return new def duplicate(self, exclude_patches=()): """Create a new revision based on 'self', incrementing the version and populating the new revision with all 'self' patches. exclude_patch (a list of 'order's) can be used to exclude patches from the operation""" new = self.duplicate_meta() order = 0 for p in self.ordered_patches(): order += 1 if order in exclude_patches: continue new.add_patch(p, order) return new def refresh_test_state(self): results = TestResult.objects.filter(revision=self) if results.count() > 0: self.test_state = max([r.state for r in results]) else: self.test_state = None self.save() self.series.save() def human_name(self): name = self.series.name if name == SERIES_DEFAULT_NAME: name = "series starting with " + self.ordered_patches()[0].name if self.version > 1: name += " (rev%d)" % self.version return name def __str__(self): return "Revision " + str(self.version) class SeriesRevisionPatch(models.Model): patch = models.ForeignKey(Patch, on_delete=models.CASCADE) revision = models.ForeignKey(SeriesRevision, on_delete=models.CASCADE) order = models.IntegerField() class Meta: unique_together = [('revision', 'patch'), ('revision', 'order')] ordering = ['order'] class Event(models.Model): name = models.CharField(max_length=20) class EventLog(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE) event_time = models.DateTimeField(auto_now=True) series = models.ForeignKey(Series, null=True, on_delete=models.CASCADE) user = models.ForeignKey(User, null=True, on_delete=models.CASCADE) parameters = jsonfield.JSONField(null=True) patch = models.ForeignKey(Patch, null=True, on_delete=models.CASCADE) class Meta: ordering = ['-event_time'] @python_2_unicode_compatible class Test(models.Model): # no mail, default so test systems/scripts can have a grace period to # settle down and give useful results RECIPIENT_NONE = 0 # send mail only to submitter RECIPIENT_SUBMITTER = 1 # send mail to submitter and mailing-list in Cc RECIPIENT_MAILING_LIST = 2 # send mail to the addresses listed in the mail_to_list field only RECIPIENT_TO_LIST = 3 RECIPIENT_CHOICES = ( (RECIPIENT_NONE, 'none'), (RECIPIENT_SUBMITTER, 'submitter'), (RECIPIENT_MAILING_LIST, 'mailing list'), (RECIPIENT_TO_LIST, 'recipient list'), ) # send result mail on any state (but pending) CONDITION_ALWAYS = 0 # send result mail on warning or failure CONDITION_ON_WARNING = 1 # send result mail on error CONDITION_ON_FAILURE = 2 CONDITION_CHOICES = ( (CONDITION_ALWAYS, 'always'), (CONDITION_ON_WARNING, 'on warning/failure'), (CONDITION_ON_FAILURE, 'on failure'), ) project = models.ForeignKey(Project, on_delete=models.CASCADE) name = models.CharField(max_length=255) mail_recipient = models.SmallIntegerField(choices=RECIPIENT_CHOICES, default=RECIPIENT_NONE) # email addresses in these lists are always added to the To: and Cc:fields, # unless we don't want to send any email at all. mail_to_list = models.CharField(max_length=255, blank=True, null=True, help_text='Comma separated list of emails') mail_cc_list = models.CharField(max_length=255, blank=True, null=True, help_text='Comma separated list of emails') mail_condition = models.SmallIntegerField(choices=CONDITION_CHOICES, default=CONDITION_ALWAYS) class Meta: unique_together = [('project', 'name')] def get_to_list(self): return get_comma_separated_field(self.mail_to_list) def get_cc_list(self): return get_comma_separated_field(self.mail_cc_list) def __str__(self): return self.name @python_2_unicode_compatible class TestResult(models.Model): test = models.ForeignKey(Test, on_delete=models.CASCADE) revision = models.ForeignKey(SeriesRevision, blank=True, null=True, on_delete=models.CASCADE) patch = models.ForeignKey(Patch, blank=True, null=True, on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) date = models.DateTimeField(auto_now=True) state = models.SmallIntegerField(choices=TestState.STATE_CHOICES) url = models.URLField(blank=True, null=True) summary = models.TextField(blank=True, null=True) def __str__(self): return self.get_state_display() class Meta: unique_together = [('test', 'revision'), ('test', 'patch')] class EmailConfirmation(models.Model): validity = datetime.timedelta(days=settings.CONFIRMATION_VALIDITY_DAYS) type = models.CharField(max_length=20, choices=[ ('userperson', 'User-Person association'), ('registration', 'Registration'), ('optout', 'Email opt-out'), ]) email = models.CharField(max_length=200) user = models.ForeignKey(User, null=True, on_delete=models.CASCADE) key = HashField() date = models.DateTimeField(default=datetime.datetime.now) active = models.BooleanField(default=True) def deactivate(self): self.active = False self.save() def is_valid(self): return self.date + self.validity > datetime.datetime.now() def save(self): max = 1 << 32 if self.key == '': str = '%s%s%d' % (self.user, self.email, random.randint(0, max)) self.key = self._meta.get_field('key').construct(str).hexdigest() super(EmailConfirmation, self).save() @python_2_unicode_compatible class EmailOptout(models.Model): email = models.CharField(max_length=200, primary_key=True) @classmethod def is_optout(cls, email): email = email.lower().strip() return cls.objects.filter(email=email).count() > 0 def __str__(self): return self.email class PatchChangeNotification(models.Model): patch = models.OneToOneField(Patch, primary_key=True, on_delete=models.CASCADE) last_modified = models.DateTimeField(default=datetime.datetime.now) orig_state = models.ForeignKey(State, on_delete=models.CASCADE) def _patch_change_log_event(old_patch, new_patch): # If state changed, log the event event_state_change = Event.objects.get(name='patch-state-change') curr_user = threadlocalrequest.get_current_user() previous_state = str(old_patch.state) new_state = str(new_patch.state) # Do not log patch-state-change events for Patches that are not part of a # Series (ie patches older than the introduction of Series) series = old_patch.series() if series: log = EventLog(event=event_state_change, user=curr_user, series_id=series.id, patch=old_patch, parameters={'previous_state': previous_state, 'new_state': new_state, }) log.save() def _patch_change_send_notification(old_patch, new_patch): if not new_patch.project.send_notifications: return notification = None try: notification = PatchChangeNotification.objects.get(patch=new_patch) except PatchChangeNotification.DoesNotExist: pass if notification is None: notification = PatchChangeNotification(patch=new_patch, orig_state=old_patch.state) elif notification.orig_state == new_patch.state: # If we're back at the original state, there is no need to notify notification.delete() return notification.last_modified = datetime.datetime.now() notification.save() def _revision_is_done(revision, summary): for entry in summary: if entry[2]: # state__action_required return False return True def _revision_update_state(revision): # the order_by() clears the default ordering (from the Meta class) which # would be used in the GROUP BY clause otherwise. See: # https://docs.djangoproject.com/en/1.8/topics/db/aggregation/#interaction-with-default-ordering-or-order-by summary = revision.patches.values_list('state', 'state__name', 'state__action_required', 'state__ordering') \ .annotate(count=models.Count('state')) \ .order_by() summary = list(summary) summary.sort(key=lambda e: e[3]) revision.state_summary = [{ 'name': s[1], 'final': not s[2], 'count': s[4], } for s in summary] # revision not yet complete revision_complete = revision.patches.count() == revision.n_patches if not revision_complete: revision.state = RevisionState.INCOMPLETE # initial state elif len(summary) == 1 and \ summary[0][0] == get_default_initial_patch_state().pk: revision.state = RevisionState.INITIAL # done: all patches are in a 'final' state, ie. a state that doesn't # require any more action elif _revision_is_done(revision, summary): revision.state = RevisionState.DONE # in progress else: revision.state = RevisionState.IN_PROGRESS revision.save() def _patch_change_update_revision_state(new_patch): # gather all the revisions we need to update (a patch can be part of more # than one revision) revisions = new_patch.seriesrevision_set.all() # we shouldn't hit this since we're careful to not call this function on # brand new patches that haven't been linked to a revision yet if len(revisions) == 0: return for rev in revisions: _revision_update_state(rev) def _patch_pre_change_callback(sender, instance, **kwargs): # we only want notification of modified patches if instance.pk is None: return if instance.project is None: return try: orig_patch = Patch.objects.get(pk=instance.pk) except Patch.DoesNotExist: return # If there's no interesting changes, abort without creating the # notification or log if orig_patch.state == instance.state: return _patch_change_log_event(orig_patch, instance) _patch_change_send_notification(orig_patch, instance) def _patch_post_change_callback(sender, instance, created, **kwargs): # We filter out brand new patches because the SeriesRevisionPatch m2m table # isn't populated at that point and so we can't query for the # SeriesRevision <-> Patch relationship. if created: return _patch_change_update_revision_state(instance) def _patch_pull_request_log_event(instance, created, **kwargs): if not instance.pull_url or not created: return event_pull_req = Event.objects.get(name='pull-request-new') curr_user = threadlocalrequest.get_current_user() log = EventLog(event=event_pull_req, user=curr_user, patch=instance, parameters={'pull_url': instance.pull_url}) log.save() def _series_revision_patch_post_change_callback(sender, instance, created, **kwargs): # We only hook into that many to many table to cover the case when the # patches are first inserted and the SeriesRevision <-> Patch link wasn't # established until now. if not created: return _revision_update_state(instance.revision) models.signals.pre_save.connect(_patch_pre_change_callback, sender=Patch) models.signals.post_save.connect(_patch_post_change_callback, sender=Patch) models.signals.post_save.connect(_patch_pull_request_log_event, sender=Patch) models.signals.post_save.connect(_series_revision_patch_post_change_callback, sender=SeriesRevisionPatch) def _on_revision_complete(sender, revision, **kwargs): series = revision.series # update series.last_revision series.last_revision = series.latest_revision() series.save() # log event new_revision = Event.objects.get(name='series-new-revision') log = EventLog(event=new_revision, series=series, user=series.submitter.user, parameters={'revision': revision.version}) log.save() series_revision_complete.connect(_on_revision_complete)
ivyl/patchwork
patchwork/models.py
Python
gpl-2.0
33,987
0.000382
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy import scrapy.log import datetime def now(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") class JmProductItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() name = scrapy.Field() #品名 count = scrapy.Field() #品牌商品数 regular_price = scrapy.Field() #原价 deal_price = scrapy.Field() #折扣价 saled_volumn = scrapy.Field() #销量 discount = scrapy.Field() #折扣 capacity = scrapy.Field() #容量. eg:30ml is_sample = scrapy.Field() # 是否是小样 is_jumei = scrapy.Field() # 是否是聚美优品自营 vendor = scrapy.Field() #供货商 stored_date = scrapy.Field() #抓取时间 is_top3 = scrapy.Field() #是否是top3商品 brand = scrapy.Field() #品牌 favorite = scrapy.Field() #收藏数 brand_id = scrapy.Field() #品牌主键id is_hot_sale = scrapy.Field() # 是否热卖 def record_exist(self, db_connection): cur = db_connection.cursor() count = cur.execute("SELECT * FROM product WHERE name = %s AND stored_date = %s", (self['name'], self['stored_date'])) cur.close() return count > 0 def save2mysql(self, db_connection): cur = db_connection.cursor() ts = now() is_jumei = (self['vendor'].find("聚美优品") >= 0) cur.execute("INSERT INTO product (brand_id, name, count, regular_price, deal_price, saled_volumn, discount, capacity, is_sample, is_jumei, vendor, stored_date, brand_name, insert_time, is_hot_sale) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", (self['brand_id'], self['name'], self['count'], self['regular_price'], self['deal_price'], self['saled_volumn'], self['discount'], self['capacity'], self['is_sample'], is_jumei, self['vendor'], self['stored_date'], self['brand'], ts, self['is_hot_sale'])) db_connection.commit() cur.close() def save2file(self, f): # test function f.write("<JmProductItem> name:{NAME}, count:{COUNT}, regular_price:{RPRICE}, deal_price:{DPRICE}, saled_volumn:{SVOLUMN}, discount:{DISCOUNT}, capacity:{CAPACITY}, is_sample:{SAMPLE}, vendor:{JUMEI}, stored_date:{SDATE}, is_top3:{TOP3}, brand:{BRAND}, favorite:{FAVORITE, is_hot_sale:{HOT_SALE}}".format(NAME = self['name'], COUNT = self['count'], RPRICE = self['regular_price'], DPRICE = self['deal_price'], SVOLUMN = self['saled_volumn'], DISCOUNT = self['discount'], CAPACITY = self['capacity'], SAMPLE = self['is_sample'], JUMEI = self['vendor'], SDATE = self['stored_date'], TOP3 = self['is_top3'], BRAND = self['brand'], FAVORITE = self['favorite'], HOT_SALE = self['is_hot_sale'])) def log_self(self, loglevel = scrapy.log.DEBUG): scrapy.log.msg("<JmProductItem> name:{NAME}, count:{COUNT}, regular_price:{RPRICE}, deal_price:{DPRICE}, saled_volumn:{SVOLUMN}, discount:{DISCOUNT}, capacity:{CAPACITY}, is_sample:{SAMPLE}, vendor:{JUMEI}, stored_date:{SDATE}, is_top3:{TOP3}, brand:{BRAND}, favorite:{FAVORITE}, is_hot_sale:{HOT_SALE}".format(NAME = self['name'], COUNT = self['count'], RPRICE = self['regular_price'], DPRICE = self['deal_price'], SVOLUMN = self['saled_volumn'], DISCOUNT = self['discount'], CAPACITY = self['capacity'], SAMPLE = self['is_sample'], JUMEI = self['vendor'], SDATE = self['stored_date'], TOP3 = self['is_top3'], BRAND = self['brand'], FAVORITE = self['favorite'], HOT_SALE = self['is_hot_sale']), loglevel) def to_str(self): print "name:%s, count:%s, regular_price:%s, deal_price:%s, saled_volumn:%s, discount:%s, capacity:%s, is_sample:%s, vendor:%s, stored_date:%s, is_top3:%s, brand:%s, favorite:%s, is_hot_sale:%s" % (self.name, self.count, self.regular_price, self.deal_price, self.saled_volumn, self.discount, self.capacity, self.is_sample, self.vendor, self.stored_date, self.is_top3, self.brand, self.favorite, self.is_hot_sale) class JmPromotionItem(scrapy.Item): name = scrapy.Field() # 专场名称 promotion_count = scrapy.Field() # 场次 stored_date = scrapy.Field() def record_exist(self, db_connection): cur = db_connection.cursor() count = cur.execute("SELECT * FROM promotion where name = %s AND stored_date = %s", (self['name'], self['stored_date'])) cur.close() return count > 0 def save2mysql(self, db_connection): cur = db_connection.cursor() ts = now() cur.execute("INSERT INTO promotion (name, promotion_count, stored_date, insert_time) VALUES (%s, %s, %s, %s)", (self['name'], self['promotion_count'], self['stored_date'], ts)) db_connection.commit() cur.close() def to_str(self): return "name:{NAME}, promotion_count:{PCOUNT}, stored_date:{SDATE}".format(NAME = self['name'], PCOUNT = self['promotion_count'], SDATE = self['stored_date']) def log_self(self, loglevel = scrapy.log.DEBUG): scrapy.log.msg("<JmPromotionItem> name:%s, promotion_count:%s, stored_date:%s" % (self['name'], self['promotion_count'], self['stored_date']), loglevel) class VipPromotionItem(scrapy.Item): name = scrapy.Field() location = scrapy.Field() chs_brand = scrapy.Field() eng_brand = scrapy.Field() sku_count = scrapy.Field() discount = scrapy.Field() category_id = scrapy.Field() stored_date = scrapy.Field() is_hot_sale = scrapy.Field() def is_crawled(self, db_connection): cur = db_connection.cursor() count = cur.execute("SELECT id FROM promotion WHERE name=%s AND stored_date=%s AND sku_count IS NOT NULL", (self['name'], self['stored_date'])) cur.close() return count > 0 def record_exist(self, db_connection): cur = db_connection.cursor() count = cur.execute("SELECT id FROM promotion WHERE name=%s AND stored_date=%s AND location_in_page=%s", (self['name'], self['stored_date'], self['location'])) cur.close() return count > 0 def save2mysql(self, db_connection): cur = db_connection.cursor() ts = now() cur.execute("INSERT INTO promotion (name, location_in_page, chs_brand, eng_brand, discount, stored_date, insert_time, category_id, is_hot_sale) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (self['name'], self['location'], self['chs_brand'], self['eng_brand'], self['discount'], self['stored_date'], ts, self['category_id'], self['is_hot_sale'])) db_connection.commit() cur.close() def update_sku_count_in_mysql(self, db_connection): cur = db_connection.cursor() cur.execute("UPDATE promotion SET sku_count=%s WHERE name=%s AND location_in_page=%s AND stored_date=%s", (self['sku_count'], self['name'], self['location'], self['stored_date'])) db_connection.commit() cur.close() def to_str(self): return "name:%s, location:%s, chs_brand:%s, eng_brand:%s, sku_count:%s, discount:%s, category:%s, is_hot_sale:%s" % (self['name'], self['location'], self['chs_brand'], self['eng_brand'], self['sku_count'], self['discount'], self['category'], self['is_hot_sale']) def log_self(self, loglevel = scrapy.log.DEBUG): scrapy.log.msg("name:%s, location:%s, chs_brand:%s, eng_brand:%s, sku_count:%s, discount:%s, category:%s, is_hot_sale:%s" % (self['name'], self['location'], self['chs_brand'], self['eng_brand'], self['sku_count'], self['discount'], self['category'], self['is_hot_sale']), loglevel)
GordonWang/JM-VIP
JMVIPCrawler/items.py
Python
apache-2.0
7,573
0.015325
howmany = input("How many numbers are you using?: ") count = howmany num = [] while count > 0: for i in howmany: x = input("Insert number ",i,": ") num.append(x) count -= 1 def sort(num): size = len(num) for i in range(size): for j in range(size-i-1): if(num[j] > num[j+1]): tmp = num[j] num[j] = num[j+1] num[j+1] = tmp return num srted = sort(num) def mean (num): mean = sum(num)/howmany return mean def median (srted): return srted((howmany + 1)/2) def mode (num): for i in howmany: if
cheesyc/basicpython
mmm.py
Python
mit
628
0.017516
import os from record import Record from timeutils import isodate from git import Git import applib import re class XmlStorage: """ XML storage engine for the record """ @staticmethod def setup(dataDir): engineDir = os.path.join(dataDir, 'xml') os.makedirs(engineDir, exist_ok=True) XmlStorage.dataDir = engineDir XmlStorage.git = Git(engineDir) return XmlStorage.git @staticmethod def sourceToDom(code): """ Parse the raw record data which is XML code, return a Xml DOM object. """ from xml.dom.minidom import parseString return parseString(code) @staticmethod def load(id, path=None): """ Load the content of the record from disk, parse it, and return a record instance. """ if not path: path = XmlStorage.idToPath(id) try: code = open(path).read() doc = XmlStorage.sourceToDom(code) except: return None # collect all fields' data fields = {} for node in doc.firstChild.childNodes: if node.nodeType == node.ELEMENT_NODE: name = node.localName textNode = node.firstChild data = textNode.data if textNode else '' fields[name] = data fields = Record.convertFields(fields.items()) return Record(**fields) @staticmethod def idToPath(id): """ Find and return the absolute path of a record """ cmd = 'find %s -name %s' % (XmlStorage.dataDir, id) stat, lines = applib.get_status_text_output(cmd) if stat and lines: return lines[0] else: return None @staticmethod def matchId(id): """ Return all IDs that starts with 'id' """ cmd = 'find %s -name .git -prune -o -name "%s*" -type f -print' cmd = cmd % (XmlStorage.dataDir, id) stat, lines = applib.get_status_text_output(cmd) ids = list(map(os.path.basename, lines)) return ids @staticmethod def createNode(root, nodeName, nodeText): """ Add an element node with nodeText to the 'root' element """ from xml.dom.minidom import Element, Text ele = Element(nodeName) text = Text() text.data = nodeText ele.appendChild(text) root.appendChild(ele) @staticmethod def recordToSource(recordData): """ Compose Xml source code from a record's data which is a dict object. """ from xml.dom.minidom import Document, Text import re doc = Document() root = doc.createElement("log") doc.appendChild(root) items = dict(recordData).items() fields = Record.convertFields(items, False) # sort the fields data according to the definition order orders = {k: v['order'] for k, v in Record.fields.items()} sortKey = lambda x: orders[x[0]] fields = sorted(fields.items(), key=sortKey) for name, value in fields: XmlStorage.createNode(root, name, value) xmlCode = doc.toprettyxml() xmlCode = re.sub('\t', ' ' * 4, xmlCode) # replace tabs with spaces return xmlCode @staticmethod def save(record, oldRecord=None): """ Convert the record to Xml code, and Write the code to the disk, record id is the basename of the record file. If the oldRecord is provided, this is to change an existing record. When to change an existing log, the new log may be saved to a new directory if its timestamp been changed, in such case the old log will be deleted. """ paths = [] if not getattr(record, 'id', None): record.id = applib.genId(record.time) if not oldRecord: # add new record commitMsg = 'Add log\n\n%s' % record.id else: commitMsg = 'Change log\n\n%s' % record.id if record != oldRecord: path = XmlStorage.idToPath(oldRecord.id) paths.append(path) XmlStorage.__delete(None, path=path) else: return path = XmlStorage.saveRecord(record.elements()) paths.append(path) # create a git commit XmlStorage.git.commit(paths, commitMsg) return record @staticmethod def saveRecord(recordData, dir=None): if not dir: dir = XmlStorage.dataDir dateEle = isodate(recordData['time']).split('-') absDirPath = os.path.join(dir, *dateEle) os.makedirs(absDirPath, exist_ok=True) path = os.path.join(absDirPath, recordData['id']) code = XmlStorage.recordToSource(recordData) open(path, 'w').write(code) return path @staticmethod def allIds(): """ Return a generator which yields IDs of all log records. """ dataDir = XmlStorage.dataDir cmd = ['find', dataDir, '-name', '.git', '-prune', '-o', '-type', 'f', '-print0'] res = applib.get_status_byte_output(cmd) if not res[0]: print('find command failed:', file=sys.stderr) print(res[2].decode(), file=sys.stderr, end='') return lines = res[1].split(b'\x00')[:-1] # remove the last empty one for path in lines: yield os.path.basename(path.decode()) @staticmethod def __delete(id, path=None): """ Delete a record, either by id or by path """ if not path: path = XmlStorage.idToPath(id) os.unlink(path) @staticmethod def delete(ids, preAction=(lambda x:False), postAction=(lambda x:0)): """ Delete multiple records, create a commit """ paths = list(map(XmlStorage.idToPath, ids)) deletedPaths = [] deletedBNames = [] for path in paths: record = XmlStorage.load(None, path) if not preAction(record): continue XmlStorage.__delete(None, path) postAction(record) deletedPaths.append(path) deletedBNames.append(record.id) if deletedPaths: message = 'Delete log\n\n%s' % '\n'.join(deletedBNames) XmlStorage.git.commit(deletedPaths, message) return True @staticmethod def lastLog(): """ Fetch the last added/changed log record """ logs = XmlStorage.lastLogs() if logs: return logs[0] else: return None @staticmethod def lastLogs(count=1): """ Fetch the last 'count' logs record The paths returned by the git.last may contain paths that been deleted, it shall be ignored. When a single record was collected multiple times, the redundant shall be removed. It can happen when the record was changed multiple times (maybe plus added action) within the range. """ vCount = count while True: ps = XmlStorage.git.last(vCount) if len(set(ps)) == count: break else: vCount += 1 paths = [] for p in ps: if p not in paths: paths.append(p) records = [] for path in paths: path = os.path.join(XmlStorage.dataDir, path) if os.path.exists(path): record = XmlStorage.load(None, path=path) records.append(record) return records @staticmethod def makeFilter(tmField, tmPoints, regexps, allMatch=False): """ Create a filter function for filtering the record with the given regular expression, and the time points. The filter function expects a Record instance object. """ def logFilter(record, regexps=regexps, allMatch=allMatch, tmField=tmField, tmPoints=tmPoints): """ timeMatch is True if the time of the record is within any pair of the tmPoints, regMatch is True if any of the provided regular expressions matches any field of a record, or all of them match any field of a record when allMatch is True. Return True only when both timeMatch and regMatch are True. """ timeMatch = regMatch = True # match time if tmPoints: t = getattr(record, tmField) x = [True for t1, t2 in tmPoints if t1 <= t <= t2] timeMatch = bool(x) # match regular expressions if regexps: texts = [record.author, record.subject, record.scene, record.people, record.tag] if not record.binary: texts.append(record.data) if allMatch: def matcher(patterns, inTexts, record): for pat, flag, field in patterns: if field: # match on a specific field texts = [getattr(record, field)] else: # match on input fields texts = inTexts match = False for text in texts: # if the pattern matches any of # the text, the pattern is match if re.search(pat, text, flag): match = True break # if any pattern is not match # the whole failed. if not match: return False return True else: def matcher(patterns, inTexts, record): for pat, flag, field in patterns: if field: # match on a specific field texts = [getattr(record, field)] else: # match on input fields texts = inTexts match = False for text in texts: # if the pattern matches any of # the text, the pattern is match if re.search(pat, text, flag): match = True break # if any pattern is match, # the whole is match. if match: return True return False regMatch = matcher(regexps, texts, record) return timeMatch and regMatch return logFilter @staticmethod def searchLogs(fields, criteria, order=None): """ Walk through all log records, collect those that match the criteria. Return a generator which yields a dict for all requested fields. """ def sortRecords(by, records, reverse=False): key = lambda record: getattr(record, by) records.sort(key=key, reverse=reverse) def transRecords(records, fields): for r in records: d = dict([i for i in r.elements().items() if i[0] in fields]) yield d # do a git assisted search if the limit is the only criteria if criteria.get('limit'): ids = criteria.get('ids') times = criteria.get('times') tpnts = times.get('points') if times else None regxs = criteria.get('regxs') patns = regxs.get('patterns') if regxs else None if not tpnts and not patns and not ids: records = XmlStorage.lastLogs(criteria['limit']) if order: sortRecords(order['by'], records, reverse=(not order['ascending'])) return transRecords(records, fields) # create the filter function if criteria and (criteria.get('times') or criteria.get('regxs')): times = criteria.get('times') tmField = times.get('field') if times else None tmPoints = times.get('points', []) if times else [] regxs = criteria.get('regxs') allMatch = regxs.get('allMatch', False) if regxs else False patterns = regxs.get('patterns') if regxs else None filter = XmlStorage.makeFilter(tmField, tmPoints, patterns, allMatch) else: filter = lambda record: True # the IDs ids = criteria.get('ids') if not ids: ids = XmlStorage.allIds() else: completeIds = [] for id in ids: completeIds.extend(XmlStorage.matchId(id)) ids = completeIds # collect the records records = [] for id in ids: x = XmlStorage.load(id) if not x: continue if filter(x): records.append(x) if order: sortRecords(order['by'], records, reverse=(not order['ascending'])) return transRecords(records, fields)
iesugrace/log-with-git
xmlstorage.py
Python
gpl-2.0
13,529
0.001626
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function from glob import glob import os import sys from setuptools import setup, Extension from Cython.Build import cythonize if sys.version_info[:2] < (2, 7): print( 'nxcpy requires Python version 2.7 or later' + ' ({}.{} detected).'.format(*sys.version_info[:2])) # Because networkx does sys.exit(-1) libraries = [ ('nxcpy', {'sources': glob('src/*.c') + glob('src/*/*.c'), 'depends': glob('src/*.h') + glob('src/*/*.h'), 'include_dirs': ['src']})] ext_modules = cythonize([ Extension('*.*', ['*/*.pyx'], include_dirs=['src'], libraries=['nxcpy']), Extension('*.*.*', ['*/*/*.pyx'], include_dirs=['src'], libraries=['nxcpy'])] ) install_requires = ['networkx', 'decorator'] if __name__ == "__main__": setup( name = 'nxcpy', packages = ['nxcpy'], libraries = libraries, ext_modules = ext_modules, install_requires = install_requires, test_suite = 'nose.collector', tests_require = ['nose>=0.10.1'] )
OrkoHunter/nxcpy
setup.py
Python
bsd-3-clause
1,193
0.020117
from django.core.management.base import BaseCommand, CommandError from django.db import transaction from operator import itemgetter import face.models.models as regis import random, math class Command(BaseCommand): args = 'none' help = 'Analyze user performance and modify individual question orderings.' @transaction.commit_manually def handle(self, *args, **options): users = regis.User.objects.filter(is_active=True) print 'Examining %d user(s)...' % len(users) # TODO: None of these measures currently include adjustments using the so-called # confidence parameter (theta): (#correct / (#guesses + sqrt(#guesses - avg(#guesses)))) # This list contains a difficulty estimation for this question across # all users. Indexed by question template ID. global_diffs = self.get_global_diffs() # The list containing global correctness rates for users. Indexed by user ID. pqs = self.get_pqs() # This list contains a difficulty estimation for a class of questions. # Indexed by class. local_diff = self.get_local_diffs() # The list containing global correctness rates for users by class. Indexed first # by class, then by users. pcs = self.get_pcs() # For each user, find the relevance scores for each question and # probabilistically order them. for user in users: relevance = {} # print 'User "%s"' % user.username for template in regis.QuestionTemplate.objects.all(): global_score = abs(global_diffs[template.id] - pqs[user.id]) local_score = 0 for tag in template.categories.all(): local_score += abs(local_diff[tag.id] - pcs[tag.id][user.id]) # Divide by the total number of categories. if len(template.categories.all()) > 0: local_score /= len(template.categories.all()) relevance[template.id] = 2 - (global_score + local_score) relevance[template.id] /= 2 # print ' #%d %s: %.3f' % (template.id, template.title, relevance[template.id]) questions = [(key, relevance[key]) for key in relevance.keys()] questions = sorted(questions, key=itemgetter(1), reverse=True) order = [] while len(questions) > 0: # Weigh the relevance so that higher values have a significantly higher probability # of being drawn. Each value [0, 1] is currently being raise to the third power but # this parameter can be tweaked. Higher values will make it more probable that # highly relevant questions will be drawn. If the value gets too high then some of # the relevance numbers will approach zero, which is not good. Don't do that. total = math.floor(sum([math.pow(question[1], 3) for question in questions]) * 1000) count = random.randint(0, total) current = 0 while count > 0: count -= questions[current][1] * questions[current][1] * 1000 current += 1 order.append(questions[current-1][0]) del questions[current-1] user_q = regis.Question.objects.exclude(status='retired').filter(user=user) for question in user_q: question.order = order.index(question.template.id) question.save() # Commit the new ordering to the database as a single transaction. transaction.commit() print 'Complete!' # For each user, compute their personal question score. This score # represents the ratio of questions that they get correct vs. those that they # have unlocked. Higher scores indicate a higher level of completion. # # Note that this score is normalized to [0, 1] across all active users. def get_pqs(self): diffs = {} for user in regis.User.objects.filter(is_active=True): solved = regis.Question.objects.filter(user=user, status='solved') unsolved = regis.Question.objects.filter(user=user, status='released') diffs[user.id] = (len(solved) * 1.0) / (len(solved) + len(unsolved)) peak_pgs = max(diffs.values()) for key in diffs.keys(): diffs[key] /= peak_pgs return diffs # For each class (tag) AND user, compute the personal class score. This # represents the ratio of questions that the user has answered correctly, # similarly to the PQS score, but this is on a per-class basis. # # This score is also normalized to [0, 1] across all active users. def get_pcs(self): users = regis.User.objects.filter(is_active=True) diffs = {} for tag in regis.QuestionTag.objects.all(): diffs[tag.id] = {} for user in users: solved = regis.Question.objects.filter(status='solved') unsolved = regis.Question.objects.filter(status='released') diffs[tag.id][user.id] = (len(solved) * 1.0) / (len(solved) + len(unsolved)) peak_pcs = max(diffs[tag.id]) for uid in diffs[tag.id].keys(): diffs[tag.id][uid] /= peak_pcs return diffs # For each class, compute the global difficulty score. Similar to # the global difficulty scores but are per-class (tag) instead of # per-template. def get_local_diffs(self): tags = regis.QuestionTag.objects.all() templates = regis.QuestionTemplate.objects.all() solved = regis.Question.objects.filter(status='solved') unsolved = regis.Question.objects.filter(status='released') correct = {} available = {} for tag in tags: correct[tag.id] = 0 available[tag.id] = 0 # Tally everything up. for tag in tags: for template in templates: for question in solved: # If the question is solved and pulled from the correct # template then count it. if question.template.id == template.id: correct[tag.id] += 1 available[tag.id] += 1 for question in unsolved: if question.template.id == template.id: available[tag.id] += 1 diffs = [] for tag in tags: if available[tag.id] > 0: diffs.append(1 - (correct[tag.id] * 1.0 / available[tag.id])) else: diffs.append(0) return diffs # For each question, compute the global difficulty score. The difficulty # score is a number in the range [0, 1] and is based on how many user have # unlocked the question vs. how many have solved it. The score increases # as the # of users to successfully solve the question decreases (they have # an inverse relationship). def get_global_diffs(self): unanswered = regis.Question.objects.exclude(user=None).filter(status='released') answered = regis.Question.objects.exclude(user=None).filter(status='solved') correct = {} available = {} templates = regis.QuestionTemplate.objects.all() for template in templates: correct[template.id] = 0 available[template.id] = 0 # Count all of the questions that are available but unanswered. for question in unanswered: available[question.template.id] += 1 # Count all of the questions that are answered. for question in answered: correct[question.template.id] += 1 available[question.template.id] += 1 diffs = {} for template in templates: if available[template.id] == 0: perc = 0 else: perc = 1 - (correct[template.id] * 1.0 / available[template.id]) diffs[template.id] = perc return diffs
anyweez/regis
face/management/commands/personalize.py
Python
gpl-2.0
8,409
0.006541
import unittest import os.path import numpy as np import pandas as pd from pandas.util.testing import assert_frame_equal import test_helper import copy from operator import lt, le, eq, ne, ge, gt from pandas.core.index import Index __index_symbol__ = { Index.union: ',', Index.intersection: '&', Index.difference: '~', Index.sym_diff: '^' } from collections import defaultdict, OrderedDict from quantipy.core.stack import Stack from quantipy.core.chain import Chain from quantipy.core.link import Link from quantipy.core.view_generators.view_mapper import ViewMapper from quantipy.core.view_generators.view_maps import QuantipyViews from quantipy.core.view import View from quantipy.core.helpers import functions from quantipy.core.helpers.functions import load_json from quantipy.core.tools.dp.prep import ( frange, frequency, crosstab ) from quantipy.core.tools.view.query import get_dataframe from quantipy.core.dataset import DataSet EXTENDED_TESTS = False COUNTER = 0 class TestRules(unittest.TestCase): def setUp(self): self.path = './tests/' project_name = 'Example Data (A)' # Load Example Data (A) data and meta into self name_data = '%s.csv' % (project_name) path_data = '%s%s' % (self.path, name_data) self.example_data_A_data = pd.DataFrame.from_csv(path_data) name_meta = '%s.json' % (project_name) path_meta = '%s%s' % (self.path, name_meta) self.example_data_A_meta = load_json(path_meta) # Variables by type for Example Data A self.dk = 'Example Data (A)' self.fk = 'no_filter' self.single = ['gender', 'locality', 'ethnicity', 'religion', 'q1'] self.delimited_set = ['q2', 'q3', 'q8', 'q9'] self.q5 = ['q5_1', 'q5_2', 'q5_3'] def test_slicex(self): meta = self.example_data_A_meta data = self.example_data_A_data col_x = 'religion' col_y = 'ethnicity' ################## values meta['columns'][col_x]['rules'] = { 'x': {'slicex': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}}} meta['columns'][col_y]['rules'] = { 'y': {'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[1, 3, 5, 7, 9, 11, 13, 15]), 'iswtd': index_items(col_x, all=True, values=[1, 3, 5, 7, 9, 11, 13, 15])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col_y, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) def _get_dataset(self): meta = self.example_data_A_meta data = self.example_data_A_data dataset = DataSet('rules_test') dataset.set_verbose_infomsg(False) dataset.from_components(data, meta) return dataset def _get_stack_with_links(self, dataset, x=None, y=None, w=None): stack = Stack() stack.add_data(dataset.name, dataset._data, dataset._meta) if not x: x = '@' if not y: y = '@' stack.add_link(x=x, y=y, weights=w) return stack def test_sortx_summaries_mean(self): dataset = self._get_dataset() x = 'q5' y = '@' dataset.sorting(x, on='mean') stack = self._get_stack_with_links(dataset, x) stack.add_link(x=x, y=y, views=['cbase', 'counts', 'c%', 'mean']) vks = ['x|f|x:|||cbase', 'x|f|:|||counts', 'x|f|:|y||c%', 'x|d.mean|x:|||mean'] chains = stack.get_chain(data_keys=dataset.name, filters='no_filter', x=[x], y=[y], rules=True, views=vks, orient_on='x') chain = chains[0] for vk in vks: v = chain['rules_test']['no_filter'][x][y][vk] l = stack['rules_test']['no_filter'][x][y][vk] check_chain_view_dataframe = v.dataframe.reindex_like(l.dataframe) self.assertTrue(check_chain_view_dataframe.equals(l.dataframe)) actual_order = v.dataframe.index.get_level_values(1).tolist() expected_order = ['q5_4', 'q5_6', 'q5_1', 'q5_3', 'q5_5', 'q5_2'] self.assertEqual(actual_order, expected_order) def test_sortx_summaries_value(self): dataset = self._get_dataset() x = 'q5' y = '@' dataset.sorting(x, on=3, ascending=True) stack = self._get_stack_with_links(dataset, x) stack.add_link(x=x, y=y, views=['cbase', 'counts', 'c%', 'mean']) vks = ['x|f|x:|||cbase', 'x|f|:|||counts', 'x|f|:|y||c%', 'x|d.mean|x:|||mean'] chains = stack.get_chain(data_keys=dataset.name, filters='no_filter', x=[x], y=[y], rules=True, views=vks, orient_on='x') chain = chains[0] for vk in vks: v = chain['rules_test']['no_filter'][x][y][vk] l = stack['rules_test']['no_filter'][x][y][vk] check_chain_view_dataframe = v.dataframe.reindex_like(l.dataframe) self.assertTrue(check_chain_view_dataframe.equals(l.dataframe)) actual_order = v.dataframe.index.get_level_values(1).tolist() expected_order = ['q5_4', 'q5_5', 'q5_6', 'q5_1', 'q5_3', 'q5_2'] self.assertEqual(actual_order, expected_order) def test_sortx_summaries_items(self): dataset = self._get_dataset() x = '@' y = 'q5' dataset.sorting(y, on='q5_2', ascending=False) stack = self._get_stack_with_links(dataset, y=y) stack.add_link(x=x, y=y, views=['cbase', 'counts', 'c%', 'mean']) vks = ['x|f|x:|||cbase', 'x|f|:|||counts', 'x|f|:|y||c%', 'x|d.mean|x:|||mean'] chains = stack.get_chain(data_keys=dataset.name, filters='no_filter', x=[x], y=[y], rules=True, views=vks, orient_on='x') chain = chains[0] for vk in vks: v = chain['rules_test']['no_filter'][x][y][vk] l = stack['rules_test']['no_filter'][x][y][vk] if not 'd.mean' in vk and not 'cbase' in vk: check_chain_view_dataframe = v.dataframe.reindex_like(l.dataframe) self.assertTrue(check_chain_view_dataframe.equals(l.dataframe)) actual_order = v.dataframe.index.get_level_values(1).tolist() expected_order = [3, 5, 98, 2, 1, 97, 4] self.assertEqual(actual_order, expected_order) def test_sortx_expand_net_within(self): dataset = self._get_dataset() x = 'q2' y = ['@', 'gender'] dataset.sorting(x, on='@', within=True, between=False, fix=98) stack = self._get_stack_with_links(dataset, x=x, y=y) net = [{'test A': [1, 2, 3], 'text': {'en-GB': 'Lab1'}}, {'test B': [5, 6, 97], 'text': {'en-GB': 'Lab2'}}] net_view = ViewMapper().make_template('frequency') view_name = 'expandnet' options = {'logic': net, 'expand': 'after', 'complete': True, 'axis': 'x', 'iterators': {'rel_to': [None, 'y']}} net_view.add_method(view_name, kwargs=options) stack.add_link(x=x, y=y, views=net_view) vks = ['x|f|x[{1,2,3}+],x[{5,6,97}+]*:|||expandnet', 'x|f|x[{1,2,3}+],x[{5,6,97}+]*:|y||expandnet'] chains = stack.get_chain(data_keys=dataset.name, filters='no_filter', x=[x], y=y, rules=True, views=vks, orient_on='x') chain = chains[0] for yk in y: for vk in vks: v = chain['rules_test']['no_filter'][x][yk][vk] l = stack['rules_test']['no_filter'][x][yk][vk] check_chain_view_dataframe = v.dataframe.reindex_like(l.dataframe) self.assertTrue(check_chain_view_dataframe.equals(l.dataframe)) actual_order = v.dataframe.index.get_level_values(1).tolist() expected_order = ['test A', 3, 2, 1, 4, 'test B', 97, 5, 6, 98] self.assertEqual(actual_order, expected_order) def test_sortx_expand_net_between(self): dataset = self._get_dataset() x = 'q2' y = ['@', 'gender'] dataset.sorting(x, on='@', within=False, between=True, ascending=True, fix=98) stack = self._get_stack_with_links(dataset, x=x, y=y) net = [{'test A': [1, 2, 3], 'text': {'en-GB': 'Lab1'}}, {'test B': [5, 6, 97], 'text': {'en-GB': 'Lab2'}}] net_view = ViewMapper().make_template('frequency') view_name = 'expandnet' options = {'logic': net, 'expand': 'after', 'complete': True, 'axis': 'x', 'iterators': {'rel_to': [None, 'y']}} net_view.add_method(view_name, kwargs=options) stack.add_link(x=x, y=y, views=net_view) vks = ['x|f|x[{1,2,3}+],x[{5,6,97}+]*:|||expandnet', 'x|f|x[{1,2,3}+],x[{5,6,97}+]*:|y||expandnet'] chains = stack.get_chain(data_keys=dataset.name, filters='no_filter', x=[x], y=y, rules=True, views=vks, orient_on='x') chain = chains[0] for yk in y: for vk in vks: v = chain['rules_test']['no_filter'][x][yk][vk] l = stack['rules_test']['no_filter'][x][yk][vk] check_chain_view_dataframe = v.dataframe.reindex_like(l.dataframe) self.assertTrue(check_chain_view_dataframe.equals(l.dataframe)) actual_order = v.dataframe.index.get_level_values(1).tolist() expected_order = [4, 'test B', 5, 6, 97, 'test A', 1, 2, 3, 98] self.assertEqual(actual_order, expected_order) def test_sortx_expand_net_within_between(self): dataset = self._get_dataset() x = 'q2' y = ['@', 'gender'] dataset.sorting(x, on='@', within=True, between=True, ascending=False, fix=98) stack = self._get_stack_with_links(dataset, x=x, y=y) net = [{'test A': [1, 2, 3], 'text': {'en-GB': 'Lab1'}}, {'test B': [5, 6, 97], 'text': {'en-GB': 'Lab2'}}] net_view = ViewMapper().make_template('frequency') view_name = 'expandnet' options = {'logic': net, 'expand': 'after', 'complete': True, 'axis': 'x', 'iterators': {'rel_to': [None, 'y']}} net_view.add_method(view_name, kwargs=options) stack.add_link(x=x, y=y, views=net_view) test_view = ViewMapper().make_template('coltests') view_name = 'test' options = {'level': 0.2} test_view.add_method(view_name, kwargs=options) stack.add_link(x=x, y=y, views=test_view) vks = ['x|f|x[{1,2,3}+],x[{5,6,97}+]*:|||expandnet', 'x|f|x[{1,2,3}+],x[{5,6,97}+]*:|y||expandnet', 'x|t.props.Dim.20|x[{1,2,3}+],x[{5,6,97}+]*:|||test'] chains = stack.get_chain(data_keys=dataset.name, filters='no_filter', x=[x], y=y, rules=True, views=vks, orient_on='x') chain = chains[0] for yk in y: for vk in vks: v = chain['rules_test']['no_filter'][x][yk][vk] l = stack['rules_test']['no_filter'][x][yk][vk] check_chain_view_dataframe = v.dataframe.reindex_like(l.dataframe) self.assertTrue(check_chain_view_dataframe.equals(l.dataframe)) actual_order = v.dataframe.index.get_level_values(1).tolist() expected_order = ['test A', 3, 2, 1, 'test B', 97, 5, 6, 4, 98] self.assertEqual(actual_order, expected_order) def test_sortx(self): meta = self.example_data_A_meta data = self.example_data_A_data col_x = 'religion' col_y = 'ethnicity' ################## sort_on - default meta['columns'][col_x]['rules'] = {'x': {'sortx': {}}} meta['columns'][col_y]['rules'] = {'y': {'sortx': {}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[2, 1, 3, 15, 4, 5, 16, 6, 10, 12, 14, 11, 7, 13, 8, 9]), 'iswtd': index_items(col_x, all=True, values=[2, 1, 3, 15, 4, 5, 16, 6, 12, 10, 14, 11, 7, 13, 9, 8])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[1, 2, 16, 7, 15, 12, 3, 11, 14, 6, 8, 10, 9, 5, 4, 13]), 'iswtd': index_items(col_y, all=True, values=[1, 2, 16, 7, 12, 11, 3, 15, 8, 9, 10, 14, 5, 6, 4, 13])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) ################## sort_on - '@' meta['columns'][col_x]['rules'] = { 'x': {'sortx': {'sort_on': '@'}}} meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'sort_on': '@'}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[2, 1, 3, 15, 4, 5, 16, 6, 10, 12, 14, 11, 7, 13, 8, 9]), 'iswtd': index_items(col_x, all=True, values=[2, 1, 3, 15, 4, 5, 16, 6, 12, 10, 14, 11, 7, 13, 9, 8])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[1, 2, 16, 7, 15, 12, 3, 11, 14, 6, 8, 10, 9, 5, 4, 13]), 'iswtd': index_items(col_y, all=True, values=[1, 2, 16, 7, 12, 11, 3, 15, 8, 9, 10, 14, 5, 6, 4, 13])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) ################## fixed meta['columns'][col_x]['rules'] = { 'x': {'sortx': {'fixed': [5, 1, 3]}}} meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'fixed': [6, 2, 4]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[2, 15, 4, 16, 6, 10, 12, 14, 11, 7, 13, 8, 9, 5, 1, 3]), 'iswtd': index_items(col_x, all=True, values=[2, 15, 4, 16, 6, 12, 10, 14, 11, 7, 13, 9, 8, 5, 1, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[1, 16, 7, 15, 12, 3, 11, 14, 8, 10, 9, 5, 13, 6, 2, 4]), 'iswtd': index_items(col_y, all=True, values=[1, 16, 7, 12, 11, 3, 15, 8, 9, 10, 14, 5, 13, 6, 2, 4])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) ################## with_weight meta['columns'][col_x]['rules'] = { 'x': {'sortx': {'with_weight': 'weight_b'}}} meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'with_weight': 'weight_b'}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[2, 1, 3, 15, 4, 5, 16, 12, 6, 10, 14, 11, 7, 13, 9, 8]), 'iswtd': index_items(col_x, all=True, values=[2, 1, 3, 15, 4, 5, 16, 12, 6, 10, 14, 11, 7, 13, 9, 8])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[1, 2, 16, 7, 11, 3, 12, 15, 8, 9, 10, 5, 14, 6, 4, 13]), 'iswtd': index_items(col_y, all=True, values=[1, 2, 16, 7, 11, 3, 12, 15, 8, 9, 10, 5, 14, 6, 4, 13])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) def test_dropx(self): meta = self.example_data_A_meta data = self.example_data_A_data col_x = 'religion' col_y = 'ethnicity' ################## values meta['columns'][col_x]['rules'] = { 'x': {'dropx': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}}} meta['columns'][col_y]['rules'] = { 'y': {'dropx': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col_x, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[1, 3, 5, 7, 9, 11, 13, 15]), 'iswtd': index_items(col_y, all=True, values=[1, 3, 5, 7, 9, 11, 13, 15])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) def test_rules_frequency(self): meta = self.example_data_A_meta data = self.example_data_A_data col = 'religion' ################## slicex meta['columns'][col]['rules'] = { 'x': {'slicex': {'values': [1, 3, 5, 7, 9, 10, 11, 13, 15]}}, 'y': {'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col, all=True, values=[1, 3, 5, 7, 9, 10, 11, 13, 15]), 'iswtd': index_items(col, all=True, values=[1, 3, 5, 7, 9, 10, 11, 13, 15])} rules_values_y = { 'unwtd': index_items(col, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16])} confirm_frequencies( self, meta, data, [None, 'weight_a'], col, rules_values_x, rules_values_y) ################## sortx meta['columns'][col]['rules'] = { 'x': {'sortx': {'fixed': [5, 1, 3]}}, 'y': {'sortx': {'fixed': [6, 2, 4]}}} rules_values_x = { 'unwtd': index_items(col, all=True, values=[2, 15, 4, 16, 6, 10, 12, 14, 11, 7, 13, 8, 9, 5, 1, 3]), 'iswtd': index_items(col, all=True, values=[2, 15, 4, 16, 6, 12, 10, 14, 11, 7, 13, 9, 8, 5, 1, 3])} rules_values_y = { 'unwtd': index_items(col, all=True, values=[1, 3, 15, 5, 16, 10, 12, 14, 11, 7, 13, 8, 9, 6, 2, 4]), 'iswtd': index_items(col, all=True, values=[1, 3, 15, 5, 16, 12, 10, 14, 11, 7, 13, 9, 8, 6, 2, 4])} confirm_frequencies( self, meta, data, [None, 'weight_a'], col, rules_values_x, rules_values_y) ################## dropx meta['columns'][col]['rules'] = { 'x': {'dropx': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}}, 'y': {'dropx': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16])} rules_values_y = { 'unwtd': index_items(col, all=True, values=[1, 3, 5, 7, 9, 11, 13, 15]), 'iswtd': index_items(col, all=True, values=[1, 3, 5, 7, 9, 11, 13, 15])} confirm_frequencies( self, meta, data, [None, 'weight_a'], col, rules_values_x, rules_values_y) ################## slicex + sortx meta['columns'][col]['rules'] = { 'x': { 'slicex': {'values': frange('4-13')}, 'sortx': {'fixed': [1, 2]}}, 'y': { 'slicex': {'values': frange('7-16')}, 'sortx': {'fixed': [15, 16]}}} rules_values_x = { 'unwtd': index_items(col, all=True, values=[4, 5, 6, 10, 12, 11, 7, 13, 8, 9, 1, 2]), 'iswtd': index_items(col, all=True, values=[4, 5, 6, 12, 10, 11, 7, 13, 9, 8, 1, 2])} rules_values_y = { 'unwtd': index_items(col, all=True, values=[10, 12, 14, 11, 7, 13, 8, 9, 15, 16]), 'iswtd': index_items(col, all=True, values=[12, 10, 14, 11, 7, 13, 9, 8, 15, 16])} confirm_frequencies( self, meta, data, [None, 'weight_a'], col, rules_values_x, rules_values_y) ################## slicex + dropx meta['columns'][col]['rules'] = { 'x': { 'slicex': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}, 'dropx': {'values': [3, 7, 11, 15]}}, 'y': { 'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}, 'dropx': {'values': [2, 6, 10, 14]}}} rules_values_x = { 'unwtd': index_items(col, all=True, values=[1, 5, 9, 13]), 'iswtd': index_items(col, all=True, values=[1, 5, 9, 13])} rules_values_y = { 'unwtd': index_items(col, all=True, values=[4, 8, 12, 16]), 'iswtd': index_items(col, all=True, values=[4, 8, 12, 16])} confirm_frequencies( self, meta, data, [None, 'weight_a'], col, rules_values_x, rules_values_y) ################## sortx + dropx meta['columns'][col]['rules'] = { 'x': { 'sortx': {'fixed': [1, 2]}, 'dropx': {'values': [5, 11, 13]}}, 'y': { 'sortx': {'fixed': [15, 16]}, 'dropx': {'values': [7, 13, 14]}}} rules_values_x = { 'unwtd': index_items(col, all=True, values=[3, 15, 4, 16, 6, 10, 12, 14, 7, 8, 9, 1, 2]), 'iswtd': index_items(col, all=True, values=[3, 15, 4, 16, 6, 12, 10, 14, 7, 9, 8, 1, 2])} rules_values_y = { 'unwtd': index_items(col, all=True, values=[2, 1, 3, 4, 5, 6, 10, 12, 11, 8, 9, 15, 16]), 'iswtd': index_items(col, all=True, values=[2, 1, 3, 4, 5, 6, 12, 10, 11, 9, 8, 15, 16])} confirm_frequencies( self, meta, data, [None, 'weight_a'], col, rules_values_x, rules_values_y) ################## slicex + sortx + dropx meta['columns'][col]['rules'] = { 'x': { 'slicex': {'values': frange('4-13')}, 'sortx': {'fixed': [11, 13]}, 'dropx': {'values': [7]}}, 'y': { 'slicex': {'values': frange('7-16')}, 'sortx': {'fixed': [15, 16]}, 'dropx': {'values': [7, 13]}}} rules_values_x = { 'unwtd': index_items(col, all=True, values=[4, 5, 6, 10, 12, 8, 9, 11, 13]), 'iswtd': index_items(col, all=True, values=[4, 5, 6, 12, 10, 9, 8, 11, 13])} rules_values_y = { 'unwtd': index_items(col, all=True, values=[10, 12, 14, 11, 8, 9, 15, 16]), 'iswtd': index_items(col, all=True, values=[12, 10, 14, 11, 9, 8, 15, 16])} confirm_frequencies( self, meta, data, [None, 'weight_a'], col, rules_values_x, rules_values_y) def test_rules_crosstab(self): meta = self.example_data_A_meta data = self.example_data_A_data col_x = 'religion' col_y = 'ethnicity' ################## slicex + sortx + dropx meta['columns'][col_x]['rules'] = { 'x': { 'slicex': {'values': frange('4-13')}, 'sortx': {'fixed': [4, 7, 3]}, 'dropx': {'values': [6, 11]}}} meta['columns'][col_y]['rules'] = { 'y': { 'slicex': {'values': frange('7-16')}, 'sortx': {'fixed': [7, 11, 13]}, 'dropx': {'values': [11, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[5, 10, 12, 13, 8, 9, 4, 7, 3]), 'iswtd': index_items(col_x, all=True, values=[5, 12, 10, 13, 9, 8, 4, 7, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[15, 12, 14, 8, 10, 9, 7, 13]), 'iswtd': index_items(col_y, all=True, values=[12, 15, 8, 9, 10, 14, 7, 13])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) if EXTENDED_TESTS: ################## slicex meta['columns'][col_x]['rules'] = { 'x': {'slicex': {'values': [1, 3, 5, 7, 9, 10, 11, 13, 15]}}} meta['columns'][col_y]['rules'] = { 'y': {'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[1, 3, 5, 7, 9, 10, 11, 13, 15]), 'iswtd': index_items(col_x, all=True, values=[1, 3, 5, 7, 9, 10, 11, 13, 15])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col_y, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) ################## sortx meta['columns'][col_x]['rules'] = { 'x': {'sortx': {'fixed': [5, 1, 3]}}} meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'fixed': [6, 2, 4]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[2, 15, 4, 16, 6, 10, 12, 14, 11, 7, 13, 8, 9, 5, 1, 3]), 'iswtd': index_items(col_x, all=True, values=[2, 15, 4, 16, 6, 12, 10, 14, 11, 7, 13, 9, 8, 5, 1, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[1, 16, 7, 15, 12, 3, 11, 14, 8, 10, 9, 5, 13, 6, 2, 4]), 'iswtd': index_items(col_y, all=True, values=[1, 16, 7, 12, 11, 3, 15, 8, 9, 10, 14, 5, 13, 6, 2, 4])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) ################## dropx meta['columns'][col_x]['rules'] = { 'x': {'dropx': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}}} meta['columns'][col_y]['rules'] = { 'y': {'dropx': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col_x, all=True, values=[2, 4, 6, 8, 10, 12, 14, 16])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[1, 3, 5, 7, 9, 11, 13, 15]), 'iswtd': index_items(col_y, all=True, values=[1, 3, 5, 7, 9, 11, 13, 15])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) ################## slicex + sortx meta['columns'][col_x]['rules'] = { 'x': { 'slicex': {'values': frange('4-13')}, 'sortx': {'fixed': [4, 7, 3]}}} meta['columns'][col_y]['rules'] = { 'y': { 'slicex': {'values': frange('7-16')}, 'sortx': {'fixed': [7, 11, 13]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[5, 6, 10, 12, 11, 13, 8, 9, 4, 7, 3]), 'iswtd': index_items(col_x, all=True, values=[5, 6, 12, 10, 11, 13, 9, 8, 4, 7, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[16, 15, 12, 14, 8, 10, 9, 7, 11, 13]), 'iswtd': index_items(col_y, all=True, values=[16, 12, 15, 8, 9, 10, 14, 7, 11, 13])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) ################## slicex + dropx meta['columns'][col_x]['rules'] = { 'x': { 'slicex': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}, 'dropx': {'values': [3, 7, 11, 15]}}} meta['columns'][col_y]['rules'] = { 'y': { 'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}, 'dropx': {'values': [2, 6, 10, 14]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[1, 5, 9, 13]), 'iswtd': index_items(col_x, all=True, values=[1, 5, 9, 13])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[4, 8, 12, 16]), 'iswtd': index_items(col_y, all=True, values=[4, 8, 12, 16])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) ################## sortx + dropx meta['columns'][col_x]['rules'] = { 'x': { 'sortx': {'fixed': [4, 7, 3]}, 'dropx': {'values': [5, 10]}}} meta['columns'][col_y]['rules'] = { 'y': { 'sortx': {'fixed': [7, 11, 13]}, 'dropx': {'values': [4, 12]}}} rules_values_x = { 'unwtd': index_items(col_x, all=True, values=[2, 1, 15, 16, 6, 12, 14, 11, 13, 8, 9, 4, 7, 3]), 'iswtd': index_items(col_x, all=True, values=[2, 1, 15, 16, 6, 12, 14, 11, 13, 9, 8, 4, 7, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=True, values=[1, 2, 16, 15, 3, 14, 6, 8, 10, 9, 5, 7, 11, 13]), 'iswtd': index_items(col_y, all=True, values=[1, 2, 16, 3, 15, 8, 9, 10, 14, 5, 6, 7, 11, 13])} confirm_crosstabs( self, meta, data, [None, 'weight_a'], col_x, col_y, rules_values_x, rules_values_y) def test_rules_get_dataframe(self): meta = self.example_data_A_meta data = self.example_data_A_data col_x = 'religion' col_y = 'ethnicity' xks = [col_x] yks = ['@', col_y] test_views = [ 'cbase', 'rbase', # 'ebase', 'counts', 'c%', 'r%', 'mean'] weights = [None, 'weight_a'] ################## slicex + sortx + dropx meta['columns'][col_x]['rules'] = { 'x': { 'slicex': {'values': frange('4-13')}, 'sortx': {'fixed': [4, 7, 3]}, 'dropx': {'values': [6, 11]}}} meta['columns'][col_y]['rules'] = { 'y': { 'slicex': {'values': frange('7-16')}, 'sortx': {'fixed': [7, 11, 13]}, 'dropx': {'values': [11, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[5, 10, 12, 13, 8, 9, 4, 7, 3]), 'iswtd': index_items(col_x, all=False, values=[5, 12, 10, 13, 9, 8, 4, 7, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[15, 12, 14, 8, 10, 9, 7, 13]), 'iswtd': index_items(col_y, all=False, values=[12, 15, 8, 9, 10, 14, 7, 13])} stack = get_stack(self, meta, data, xks, yks, test_views, weights, extras=True) confirm_get_dataframe( self, stack, col_x, col_y, rules_values_x, rules_values_y) if EXTENDED_TESTS: ################## slicex meta['columns'][col_x]['rules'] = { 'x': {'slicex': {'values': [1, 3, 5, 7, 9, 10, 11, 13, 15]}}} meta['columns'][col_y]['rules'] = { 'y': {'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[1, 3, 5, 7, 9, 10, 11, 13, 15]), 'iswtd': index_items(col_x, all=False, values=[1, 3, 5, 7, 9, 10, 11, 13, 15])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col_y, all=False, values=[2, 4, 6, 8, 10, 12, 14, 16])} stack = get_stack(self, meta, data, xks, yks, test_views, weights, extras=True) confirm_get_dataframe( self, stack, col_x, col_y, rules_values_x, rules_values_y) ################## sortx meta['columns'][col_x]['rules'] = { 'x': {'sortx': {'fixed': [5, 1, 3]}}} meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'fixed': [6, 2, 4]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[2, 15, 4, 16, 6, 10, 12, 14, 11, 7, 13, 8, 9, 5, 1, 3]), 'iswtd': index_items(col_x, all=False, values=[2, 15, 4, 16, 6, 12, 10, 14, 11, 7, 13, 9, 8, 5, 1, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[1, 16, 7, 15, 12, 3, 11, 14, 8, 10, 9, 5, 13, 6, 2, 4]), 'iswtd': index_items(col_y, all=False, values=[1, 16, 7, 12, 11, 3, 15, 8, 9, 10, 14, 5, 13, 6, 2, 4])} stack = get_stack(self, meta, data, xks, yks, test_views, weights, extras=True) confirm_get_dataframe( self, stack, col_x, col_y, rules_values_x, rules_values_y) ################## dropx meta['columns'][col_x]['rules'] = { 'x': {'dropx': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}}} meta['columns'][col_y]['rules'] = { 'y': {'dropx': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col_x, all=False, values=[2, 4, 6, 8, 10, 12, 14, 16])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[1, 3, 5, 7, 9, 11, 13, 15]), 'iswtd': index_items(col_y, all=False, values=[1, 3, 5, 7, 9, 11, 13, 15])} stack = get_stack(self, meta, data, xks, yks, test_views, weights, extras=True) confirm_get_dataframe( self, stack, col_x, col_y, rules_values_x, rules_values_y) ################## slicex + sortx meta['columns'][col_x]['rules'] = { 'x': { 'slicex': {'values': frange('4-13')}, 'sortx': {'fixed': [4, 7, 3]}}} meta['columns'][col_y]['rules'] = { 'y': { 'slicex': {'values': frange('7-16')}, 'sortx': {'fixed': [7, 11, 13]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[5, 6, 10, 12, 11, 13, 8, 9, 4, 7, 3]), 'iswtd': index_items(col_x, all=False, values=[5, 6, 12, 10, 11, 13, 9, 8, 4, 7, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[16, 15, 12, 14, 8, 10, 9, 7, 11, 13]), 'iswtd': index_items(col_y, all=False, values=[16, 12, 15, 8, 9, 10, 14, 7, 11, 13])} stack = get_stack(self, meta, data, xks, yks, test_views, weights, extras=True) confirm_get_dataframe( self, stack, col_x, col_y, rules_values_x, rules_values_y) ################## slicex + dropx meta['columns'][col_x]['rules'] = { 'x': { 'slicex': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}, 'dropx': {'values': [3, 7, 11, 15]}}} meta['columns'][col_y]['rules'] = { 'y': { 'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}, 'dropx': {'values': [2, 6, 10, 14]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[1, 5, 9, 13]), 'iswtd': index_items(col_x, all=False, values=[1, 5, 9, 13])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[4, 8, 12, 16]), 'iswtd': index_items(col_y, all=False, values=[4, 8, 12, 16])} stack = get_stack(self, meta, data, xks, yks, test_views, weights, extras=True) confirm_get_dataframe( self, stack, col_x, col_y, rules_values_x, rules_values_y) ################## sortx + dropx meta['columns'][col_x]['rules'] = { 'x': { 'sortx': {'fixed': [4, 7, 3]}, 'dropx': {'values': [5, 10]}}} meta['columns'][col_y]['rules'] = { 'y': { 'sortx': {'fixed': [7, 11, 13]}, 'dropx': {'values': [4, 12]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[2, 1, 15, 16, 6, 12, 14, 11, 13, 8, 9, 4, 7, 3]), 'iswtd': index_items(col_x, all=False, values=[2, 1, 15, 16, 6, 12, 14, 11, 13, 9, 8, 4, 7, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[1, 2, 16, 15, 3, 14, 6, 8, 10, 9, 5, 7, 11, 13]), 'iswtd': index_items(col_y, all=False, values=[1, 2, 16, 3, 15, 8, 9, 10, 14, 5, 6, 7, 11, 13])} stack = get_stack(self, meta, data, xks, yks, test_views, weights, extras=True) confirm_get_dataframe( self, stack, col_x, col_y, rules_values_x, rules_values_y) def test_rules_get_chain(self): meta = self.example_data_A_meta data = self.example_data_A_data col_x = 'religion' col_y = 'ethnicity' others = ['q5_1'] xks = [col_x] yks = ['@', col_y] + others test_views = [ 'cbase', 'rbase', # 'ebase', 'counts', 'c%', 'r%', 'mean'] weights = [None, 'weight_a'] ################## slicex + sortx + dropx meta['columns'][col_x]['rules'] = { 'x': { 'slicex': {'values': frange('4-13')}, 'sortx': {'fixed': [4, 7, 3]}, 'dropx': {'values': [6, 11]}}} meta['columns'][col_y]['rules'] = { 'y': { 'slicex': {'values': frange('7-16')}, 'sortx': {'fixed': [7, 11, 13]}, 'dropx': {'values': [11, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[5, 10, 12, 13, 8, 9, 4, 7, 3]), 'iswtd': index_items(col_x, all=False, values=[5, 12, 10, 13, 9, 8, 4, 7, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[15, 12, 14, 8, 10, 9, 7, 13]), 'iswtd': index_items(col_y, all=False, values=[12, 15, 8, 9, 10, 14, 7, 13])} confirm_xy_chains( self, meta, data, col_x, col_y, others, test_views, weights, rules_values_x, rules_values_y) if EXTENDED_TESTS: ################## slicex meta['columns'][col_x]['rules'] = { 'x': {'slicex': {'values': [1, 3, 5, 7, 9, 10, 11, 13, 15]}}} meta['columns'][col_y]['rules'] = { 'y': {'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[1, 3, 5, 7, 9, 10, 11, 13, 15]), 'iswtd': index_items(col_x, all=False, values=[1, 3, 5, 7, 9, 10, 11, 13, 15])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col_y, all=False, values=[2, 4, 6, 8, 10, 12, 14, 16])} confirm_xy_chains( self, meta, data, col_x, col_y, others, test_views, weights, rules_values_x, rules_values_y) ################## sortx meta['columns'][col_x]['rules'] = { 'x': {'sortx': {'fixed': [5, 1, 3]}}} meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'fixed': [6, 2, 4]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[2, 15, 4, 16, 6, 10, 12, 14, 11, 7, 13, 8, 9, 5, 1, 3]), 'iswtd': index_items(col_x, all=False, values=[2, 15, 4, 16, 6, 12, 10, 14, 11, 7, 13, 9, 8, 5, 1, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[1, 16, 7, 15, 12, 3, 11, 14, 8, 10, 9, 5, 13, 6, 2, 4]), 'iswtd': index_items(col_y, all=False, values=[1, 16, 7, 12, 11, 3, 15, 8, 9, 10, 14, 5, 13, 6, 2, 4])} confirm_xy_chains( self, meta, data, col_x, col_y, others, test_views, weights, rules_values_x, rules_values_y) ################## dropx meta['columns'][col_x]['rules'] = { 'x': {'dropx': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}}} meta['columns'][col_y]['rules'] = { 'y': {'dropx': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[2, 4, 6, 8, 10, 12, 14, 16]), 'iswtd': index_items(col_x, all=False, values=[2, 4, 6, 8, 10, 12, 14, 16])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[1, 3, 5, 7, 9, 11, 13, 15]), 'iswtd': index_items(col_y, all=False, values=[1, 3, 5, 7, 9, 11, 13, 15])} confirm_xy_chains( self, meta, data, col_x, col_y, others, test_views, weights, rules_values_x, rules_values_y) ################## slicex + sortx meta['columns'][col_x]['rules'] = { 'x': { 'slicex': {'values': frange('4-13')}, 'sortx': {'fixed': [4, 7, 3]}}} meta['columns'][col_y]['rules'] = { 'y': { 'slicex': {'values': frange('7-16')}, 'sortx': {'fixed': [7, 11, 13]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[5, 6, 10, 12, 11, 13, 8, 9, 4, 7, 3]), 'iswtd': index_items(col_x, all=False, values=[5, 6, 12, 10, 11, 13, 9, 8, 4, 7, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[16, 15, 12, 14, 8, 10, 9, 7, 11, 13]), 'iswtd': index_items(col_y, all=False, values=[16, 12, 15, 8, 9, 10, 14, 7, 11, 13])} stack = get_stack(self, meta, data, xks, yks, test_views, weights, extras=True) confirm_xy_chains( self, meta, data, col_x, col_y, others, test_views, weights, rules_values_x, rules_values_y) ################## slicex + dropx meta['columns'][col_x]['rules'] = { 'x': { 'slicex': {'values': [1, 3, 5, 7, 9, 11, 13, 15]}, 'dropx': {'values': [3, 7, 11, 15]}}} meta['columns'][col_y]['rules'] = { 'y': { 'slicex': {'values': [2, 4, 6, 8, 10, 12, 14, 16]}, 'dropx': {'values': [2, 6, 10, 14]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[1, 5, 9, 13]), 'iswtd': index_items(col_x, all=False, values=[1, 5, 9, 13])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[4, 8, 12, 16]), 'iswtd': index_items(col_y, all=False, values=[4, 8, 12, 16])} confirm_xy_chains( self, meta, data, col_x, col_y, others, test_views, weights, rules_values_x, rules_values_y) ################## sortx + dropx meta['columns'][col_x]['rules'] = { 'x': { 'sortx': {'fixed': [4, 7, 3]}, 'dropx': {'values': [5, 10]}}} meta['columns'][col_y]['rules'] = { 'y': { 'sortx': {'fixed': [7, 11, 13]}, 'dropx': {'values': [4, 12]}}} rules_values_x = { 'unwtd': index_items(col_x, all=False, values=[2, 1, 15, 16, 6, 12, 14, 11, 13, 8, 9, 4, 7, 3]), 'iswtd': index_items(col_x, all=False, values=[2, 1, 15, 16, 6, 12, 14, 11, 13, 9, 8, 4, 7, 3])} rules_values_y = { 'unwtd': index_items(col_y, all=False, values=[1, 2, 16, 15, 3, 14, 6, 8, 10, 9, 5, 7, 11, 13]), 'iswtd': index_items(col_y, all=False, values=[1, 2, 16, 3, 15, 8, 9, 10, 14, 5, 6, 7, 11, 13])} confirm_xy_chains( self, meta, data, col_x, col_y, others, test_views, weights, rules_values_x, rules_values_y) def test_rules_coltests(self): meta = self.example_data_A_meta data = self.example_data_A_data col_x = 'q5_1' col_y = 'locality' xks = [col_x] yks = ['@', col_y] test_views = [ 'cbase', 'counts', 'mean'] weights = [None] dk = 'test' fk = 'no_filter' xk = col_x yk = col_y stack = get_stack( self, meta, data, xks, yks, test_views, weights, extras=True, coltests=True) ################## slicex ######### counts meta['columns'][col_y]['rules'] = { 'y': {'slicex': {'values': [5, 2, 3]}}} vk = 'x|t.props.askia.01|:|||askia tests' rules_values_df = pd.DataFrame([ [np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN], ['[2]', np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN]]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ######### net meta['columns'][col_y]['rules'] = { 'y': {'slicex': {'values': [3, 1, 5]}}} vk = 'x|t.props.askia.10|x[{1,2,3}]:|||askia tests' rules_values_df = pd.DataFrame([ [np.NaN, '[5]', np.NaN]]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ######### block net meta['columns'][col_y]['rules'] = { 'y': {'slicex': {'values': [4, 1, 3]}}} vk = 'x|t.props.askia.10|x[{1,2}],x[{2,3}],x[{1,3}]:|||askia tests' rules_values_df = pd.DataFrame([ [np.NaN, np.NaN, np.NaN], [np.NaN, '[3, 4]', np.NaN], [np.NaN, '[4]', np.NaN]]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ######### mean meta['columns'][col_y]['rules'] = { 'y': {'slicex': {'values': [5, 2, 4]}}} vk = 'x|t.means.askia.10|x:|||askia tests' rules_values_df = pd.DataFrame([ ['[2, 4]', np.NaN, '[2]']]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ################## sortx ######### counts meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'fixed': [1, 2]}}} vk = 'x|t.props.askia.01|:|||askia tests' rules_values_df = pd.DataFrame([ [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, '[5]', np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN], ['[1]', np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, '[1, 2]', np.NaN, np.NaN, np.NaN], [np.NaN, '[1]', np.NaN, np.NaN, np.NaN]]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ######### net meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'fixed': [1, 2]}}} vk = 'x|t.props.askia.10|x[{1,2,3}]:|||askia tests' rules_values_df = pd.DataFrame([ [np.NaN, np.NaN, np.NaN, '[4, 5]', '[4]']]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ######### block net meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'fixed': [1, 2]}}} vk = 'x|t.props.askia.10|x[{1,2}],x[{2,3}],x[{1,3}]:|||askia tests' rules_values_df = pd.DataFrame([ ['[5]', np.NaN, np.NaN, '[2, 5]', np.NaN], [np.NaN, np.NaN, np.NaN, '[3, 4, 5]', '[4, 5]'], [np.NaN, np.NaN, np.NaN, '[4]', np.NaN]]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ######### mean meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'fixed': [1, 2]}}} vk = 'x|t.means.askia.10|x:|||askia tests' rules_values_df = pd.DataFrame([ ['[1]', '[1, 2, 3, 4]', '[1, 2, 3]', np.NaN, '[1]']]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ################## dropx ######### counts meta['columns'][col_y]['rules'] = { 'y': {'dropx': {'values': [1, 4]}}} vk = 'x|t.props.askia.01|:|||askia tests' rules_values_df = pd.DataFrame([ [np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, '[2]'], [np.NaN, np.NaN, np.NaN]]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ######### net meta['columns'][col_y]['rules'] = { 'y': {'dropx': {'values': [1, 3]}}} vk = 'x|t.props.askia.10|x[{1,2,3}]:|||askia tests' rules_values_df = pd.DataFrame([ ['[4]', np.NaN, np.NaN]]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ######### block net meta['columns'][col_y]['rules'] = { 'y': {'dropx': {'values': [2, 4]}}} vk = 'x|t.props.askia.10|x[{1,2}],x[{2,3}],x[{1,3}]:|||askia tests' rules_values_df = pd.DataFrame([ ['[5]', '[5]', np.NaN], ['[3, 5]', np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN]]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ######### mean meta['columns'][col_y]['rules'] = { 'y': {'dropx': {'values': [1, 3]}}} vk = 'x|t.means.askia.10|x:|||askia tests' rules_values_df = pd.DataFrame([ [np.NaN, '[2]', '[2, 4]']]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) def test_rules_coltests_flag_bases(self): meta = self.example_data_A_meta data = self.example_data_A_data col_x = 'q5_1' col_y = 'locality' xks = [col_x] yks = ['@', col_y] test_views = [ 'cbase', 'counts', 'mean'] weights = [None] dk = 'test' fk = 'no_filter' xk = col_x yk = col_y minimum = 1000 small = 2000 stack = get_stack( self, meta, data, xks, yks, test_views, weights, extras=True, coltests=True, flag_bases=[minimum, small]) ################## slicex ######### counts meta['columns'][col_y]['rules'] = { 'y': {'slicex': {'values': [5, 2, 3]}}} vk = 'x|t.props.Dim.05|:|||askia tests' rules_values_df = pd.DataFrame([ ['**', np.NaN, '[2]*'], ['**', np.NaN, '*'], ['**', np.NaN, '*'], ['**', np.NaN, '*'], ['**', np.NaN, '*'], ['**', np.NaN, '*'], ['**', np.NaN, '*']]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) cbase = 'x|f|x:|||cbase' keys_cbase = [dk, fk, xk, yk, cbase] df_cbase = get_dataframe(stack, keys=keys_cbase, rules=True) is_minimum = [c<=minimum for c in df_cbase.values[0]] is_small = [c>minimum and c<=small for c in df_cbase.values[0]] actual = is_minimum expected = [True, False, False] self.assertSequenceEqual(actual, expected) actual = is_small expected = [False, False, True] self.assertSequenceEqual(actual, expected) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ################## sortx ######### counts meta['columns'][col_y]['rules'] = { 'y': {'sortx': {'fixed': [1, 2]}}} vk = 'x|t.props.Dim.05|:|||askia tests' rules_values_df = pd.DataFrame([ ['[1, 2]*', '**', '**', np.NaN, np.NaN], ['*', '**', '**', '[2, 3]', np.NaN], ['*', '**', '**', np.NaN, np.NaN], ['[1]*', '**', '**', np.NaN, '[1]'], ['*', '**', '**', np.NaN, np.NaN], ['*', '**', '**', np.NaN, np.NaN], ['*', '**', '**', np.NaN, np.NaN]]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) cbase = 'x|f|x:|||cbase' keys_cbase = [dk, fk, xk, yk, cbase] df_cbase = get_dataframe(stack, keys=keys_cbase, rules=True) is_minimum = [c<=minimum for c in df_cbase.values[0]] is_small = [c>minimum and c<=small for c in df_cbase.values[0]] actual = is_minimum expected = [False, True, True, False, False] self.assertSequenceEqual(actual, expected) actual = is_small expected = [True, False, False, False, False] self.assertSequenceEqual(actual, expected) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) ################## dropx ######### counts meta['columns'][col_y]['rules'] = { 'y': {'dropx': {'values': [1, 4]}}} vk = 'x|t.props.Dim.05|:|||askia tests' rules_values_df = pd.DataFrame([ [np.NaN, '[2]*', '**'], [np.NaN, '*', '**'], [np.NaN, '*', '**'], [np.NaN, '*', '**'], [np.NaN, '*', '**'], [np.NaN, '*', '**'], [np.NaN, '*', '**']]) keys = [dk, fk, xk, yk, vk] df = get_dataframe(stack, keys=keys, rules=True) cbase = 'x|f|x:|||cbase' keys_cbase = [dk, fk, xk, yk, cbase] df_cbase = get_dataframe(stack, keys=keys_cbase, rules=True) is_minimum = [c<=minimum for c in df_cbase.values[0]] is_small = [c>minimum and c<=small for c in df_cbase.values[0]] actual = is_minimum expected = [False, False, True] self.assertSequenceEqual(actual, expected) actual = is_small expected = [False, True, False] self.assertSequenceEqual(actual, expected) actual = df.fillna(0).values.tolist() expected = rules_values_df.fillna(0).values.tolist() self.assertSequenceEqual(actual, expected) # ##################### Helper functions ##################### def index_items(col, values, all=False): """ Return a correctly formed list of tuples to matching an index. """ items = [ (col, i) for i in values ] if all: items = [(col, 'All')] + items return items def confirm_frequencies(self, meta, data, weights, col, rules_values_x, rules_values_y): """ Confirms all variations of rules applied with frequency. """ df = frequency(meta, data, x=col) natural_x = df.index.values.tolist() natural_y = natural_x frequ_x = [(col, '@')] frequ_y = frequ_x for weight in weights: if weight is None: rules_x = rules_values_x['unwtd'] rules_y = rules_values_y['unwtd'] else: rules_x = rules_values_x['iswtd'] rules_y = rules_values_y['iswtd'] # rules=True fx = frequency(meta, data, x=col, weight=weight, rules=True) fy = frequency(meta, data, y=col, weight=weight, rules=True) # print fx # print zip(*rules_x)[1] # print zip(*rules_y)[1] confirm_index_columns(self, fx, rules_x, frequ_x) confirm_index_columns(self, fy, frequ_x, rules_y) # rules=False fx = frequency(meta, data, x=col, weight=weight, rules=False) fy = frequency(meta, data, y=col, weight=weight, rules=False) confirm_index_columns(self, fx, natural_x, frequ_x) confirm_index_columns(self, fy, frequ_x, natural_y) # rules=x fx = frequency(meta, data, x=col, weight=weight, rules=['x']) fy = frequency(meta, data, y=col, weight=weight, rules=['x']) confirm_index_columns(self, fx, rules_x, frequ_x) confirm_index_columns(self, fy, frequ_x, natural_y) # rules=y fx = frequency(meta, data, x=col, weight=weight, rules=['y']) fy = frequency(meta, data, y=col, weight=weight, rules=['y']) confirm_index_columns(self, fx, natural_x, frequ_x) confirm_index_columns(self, fy, frequ_x, rules_y) # rules=xy fx = frequency(meta, data, x=col, weight=weight, rules=['x', 'y']) fy = frequency(meta, data, y=col, weight=weight, rules=['x', 'y']) confirm_index_columns(self, fx, rules_x, frequ_x) confirm_index_columns(self, fy, frequ_x, rules_y) def confirm_crosstabs(self, meta, data, weights, col_x, col_y, rules_values_x, rules_values_y): """ Confirms all variations of rules applied with frequency. """ fx = frequency(meta, data, x=col_x) natural_x = fx.index.values.tolist() fy = frequency(meta, data, y=col_y) natural_y = fy.columns.values.tolist() for weight in weights: if weight is None: rules_x = rules_values_x['unwtd'] rules_y = rules_values_y['unwtd'] else: rules_x = rules_values_x['iswtd'] rules_y = rules_values_y['iswtd'] for xtotal in [False, True]: # rules=True df = crosstab(meta, data, col_x, col_y, weight=weight, rules=True, xtotal=xtotal) confirm_index_columns(self, df, rules_x, rules_y) # print df # print df.index # print df.columns # print zip(*rules_x)[1] # print zip(*rules_y)[1] # rules=False df = crosstab(meta, data, col_x, col_y, weight=weight, rules=False, xtotal=xtotal) confirm_index_columns(self, df, natural_x, natural_y) # rules=x df = crosstab(meta, data, col_x, col_y, weight=weight, rules=['x'], xtotal=xtotal) confirm_index_columns(self, df, rules_x, natural_y) # rules=y df = crosstab(meta, data, col_x, col_y, weight=weight, rules=['y'], xtotal=xtotal) confirm_index_columns(self, df, natural_x, rules_y) # rules=xy df = crosstab(meta, data, col_x, col_y, weight=weight, rules=['x', 'y'], xtotal=xtotal) confirm_index_columns(self, df, rules_x, rules_y) def confirm_get_dataframe(self, stack, col_x, col_y, rules_values_x, rules_values_y): """ Confirms all variations of rules applied with frequency. """ keys = ['dk', 'fk', 'xk', 'yk', 'vk'] keys[0] = dk = 'test' keys[1] = fk = 'no_filter' keys[2] = xk = col_x keys[3] = yk = col_y meta = stack[dk].meta data = stack[dk].data vks = stack.describe()['view'].values.tolist() for xk in [col_x]: keys[2] = xk for yk in ['@', col_y]: if xk=='@' and yk=='@': continue keys[3] = yk for vk in vks: keys[4] = vk # if 'mean' in vk: # print vk rules_x, natural_x, rules_y, natural_y = get_xy_values( meta, data, col_x, col_y, xk, yk, vk, rules_values_x, rules_values_y ) # rules=True df = get_dataframe(stack, keys=keys, rules=True) # print df # print df.index # print df.columns # print zip(*rules_x)[1] # print zip(*rules_y)[1] confirm_index_columns(self, df, rules_x, rules_y) # rules=False df = get_dataframe(stack, keys=keys, rules=False) confirm_index_columns(self, df, natural_x, natural_y) # rules=x df = get_dataframe(stack, keys=keys, rules=['x']) confirm_index_columns(self, df, rules_x, natural_y) # rules=y df = get_dataframe(stack, keys=keys, rules=['y']) confirm_index_columns(self, df, natural_x, rules_y) # rules=xy df = get_dataframe(stack, keys=keys, rules=['x', 'y']) confirm_index_columns(self, df, rules_x, rules_y) def confirm_xy_chains(self, meta, data, col_x, col_y, others, views, weights, rules_values_x, rules_values_y): stack = get_stack( self, meta, data, [col_x], ['@', col_y] + others, views, weights, extras=True) confirm_get_xchain( self, stack, col_x, col_y, others, rules_values_x, rules_values_y) stack = get_stack( self, meta, data, [col_x] + others, [col_y], views, weights, extras=True) confirm_get_ychain( self, stack, col_x, col_y, others, rules_values_x, rules_values_y) def confirm_get_xchain(self, stack, col_x, col_y, others, rules_values_x, rules_values_y): """ Confirms all variations of rules applied with frequency. """ keys = ['dk', 'fk', 'xk', 'yk', 'vk'] keys[0] = dk = 'test' keys[1] = fk = 'no_filter' keys[2] = xk = col_x keys[3] = yk = col_y meta = stack[dk].meta data = stack[dk].data xks = [col_x] yks = ['@', col_y] + others confirm_get_chain( self, meta, data, stack, keys, col_x, col_y, xks, yks, rules_values_x, rules_values_y, others) def confirm_get_ychain(self, stack, col_x, col_y, others, rules_values_x, rules_values_y): """ Confirms all variations of rules applied with frequency. """ keys = ['dk', 'fk', 'xk', 'yk', 'vk'] keys[0] = dk = 'test' keys[1] = fk = 'no_filter' keys[2] = xk = col_x keys[3] = yk = col_y meta = stack[dk].meta data = stack[dk].data xks = [col_x] + others yks = [col_y] confirm_get_chain( self, meta, data, stack, keys, col_x, col_y, xks, yks, rules_values_x, rules_values_y, others) def confirm_get_chain(self, meta, data, stack, keys, col_x, col_y, xks, yks, rules_values_x, rules_values_y, others=[]): vks = stack.describe()['view'].values.tolist() weight = None chain_true_unwtd = stack.get_chain(x=xks, y=yks, views=vks, rules=True, rules_weight=weight) chain_false_unwtd = stack.get_chain(x=xks, y=yks, views=vks, rules=False, rules_weight=weight) chain_x_unwtd = stack.get_chain(x=xks, y=yks, views=vks, rules=['x'], rules_weight=weight) chain_y_unwtd = stack.get_chain(x=xks, y=yks, views=vks, rules=['y'], rules_weight=weight) chain_xy_unwtd = stack.get_chain(x=xks, y=yks, views=vks, rules=['x', 'y'], rules_weight=weight) weight = 'weight_a' chain_true_wtd = stack.get_chain(x=xks, y=yks, views=vks, rules=True, rules_weight=weight) chain_false_wtd = stack.get_chain(x=xks, y=yks, views=vks, rules=False, rules_weight=weight) chain_x_wtd = stack.get_chain(x=xks, y=yks, views=vks, rules=['x'], rules_weight=weight) chain_y_wtd = stack.get_chain(x=xks, y=yks, views=vks, rules=['y'], rules_weight=weight) chain_xy_wtd = stack.get_chain(x=xks, y=yks, views=vks, rules=['x', 'y'], rules_weight=weight) for xk in xks: keys[2] = xk for yk in yks: if xk=='@' and yk=='@': continue keys[3] = yk for vk in vks: keys[4] = vk for weight in [None, 'weight_a']: # if xk=='q5_1' and yk=='ethnicity' and vk=='x|f|x:|||ebase': # print xk, yk, vk # if vk=='x|f|:y|||rbase' and yk=='q5_1': # print vk rules_x, natural_x, rules_y, natural_y = get_xy_values( meta, data, col_x, col_y, xk, yk, vk, rules_values_x, rules_values_y, others, rules_weight=weight ) # rules=True if weight is None: df = get_dataframe(chain_true_unwtd, keys=keys, rules=False) # print df # print df.index # print df.columns # print zip(*rules_x)[1] # print zip(*rules_y)[1] confirm_index_columns(self, df, rules_x, rules_y) # rules=False df = get_dataframe(chain_false_unwtd, keys=keys, rules=False) confirm_index_columns(self, df, natural_x, natural_y) # rules=x df = get_dataframe(chain_x_unwtd, keys=keys, rules=False) confirm_index_columns(self, df, rules_x, natural_y) # rules=y df = get_dataframe(chain_y_unwtd, keys=keys, rules=False) confirm_index_columns(self, df, natural_x, rules_y) # rules=xy df = get_dataframe(chain_xy_unwtd, keys=keys, rules=False) confirm_index_columns(self, df, rules_x, rules_y) else: df = get_dataframe(chain_true_wtd, keys=keys, rules=False) # print df # print df.index # print df.columns # print zip(*rules_x)[1] # print zip(*rules_y)[1] confirm_index_columns(self, df, rules_x, rules_y) # rules=False df = get_dataframe(chain_false_wtd, keys=keys, rules=False) confirm_index_columns(self, df, natural_x, natural_y) # rules=x df = get_dataframe(chain_x_wtd, keys=keys, rules=False) confirm_index_columns(self, df, rules_x, natural_y) # rules=y df = get_dataframe(chain_y_wtd, keys=keys, rules=False) confirm_index_columns(self, df, natural_x, rules_y) # rules=xy df = get_dataframe(chain_xy_wtd, keys=keys, rules=False) confirm_index_columns(self, df, rules_x, rules_y) def get_xy_values(meta, data, col_x, col_y, xk, yk, vk, rules_values_x, rules_values_y, others=[], rules_weight='auto'): v_method = vk.split('|')[1] relation = vk.split('|')[2] relative = vk.split('|')[3] weight = vk.split('|')[4] shortnam = vk.split('|')[5] condensed_x = relation.split(":")[0].startswith('x') or v_method.startswith('d.') condensed_y = relation.split(":")[1].startswith('y') if rules_weight=='auto': rules_weight = None if weight=='' else weight if rules_weight is None: rules_x = rules_values_x['unwtd'] rules_y = rules_values_y['unwtd'] else: rules_x = rules_values_x['iswtd'] rules_y = rules_values_y['iswtd'] if xk in others: fx = frequency(meta, data, x=xk) natural_x = fx.index.values.tolist() natural_x.remove((xk, 'All')) rules_x = natural_x if condensed_x: if shortnam=='Block net': rules_x = natural_x = [ (xk, 'bn1'), (xk, 'bn2'), (xk, 'bn3')] elif shortnam in ['cbase', 'ebase']: rules_x = natural_x = [(xk, 'All')] else: rules_x = natural_x = [(xk, shortnam)] elif xk=='@': if condensed_x: if shortnam=='Block net': rules_x = natural_x = [ (col_x, 'bn1'), (col_x, 'bn2'), (col_x, 'bn3')] elif shortnam in ['cbase', 'ebase']: rules_x = natural_x = [(col_y, 'All')] else: rules_x = natural_x = [(col_y, shortnam)] else: rules_x = natural_x = [(col_y, '@')] elif condensed_x: if shortnam=='Block net': rules_x = natural_x = [ (col_x, 'bn1'), (col_x, 'bn2'), (col_x, 'bn3')] elif shortnam in ['cbase', 'ebase']: rules_x = natural_x = [(xk, 'All')] else: rules_x = natural_x = [(xk, shortnam)] else: fx = frequency(meta, data, x=col_x) natural_x = fx.index.values.tolist() natural_x.remove((col_x, 'All')) if yk in others: fy = frequency(meta, data, y=yk) natural_y = fy.columns.values.tolist() natural_y.remove((yk, 'All')) rules_y = natural_y if condensed_y: if shortnam=='Block net': rules_y = natural_y = [ (yk, 'bn1'), (yk, 'bn2'), (yk, 'bn3')] elif shortnam in ['rbase']: rules_y = natural_y = [(yk, 'All')] else: rules_y = natural_y = [(yk, shortnam)] elif yk=='@': if condensed_y: if shortnam=='Block net': rules_y = natural_y = [ (col_y, 'bn1'), (col_y, 'bn2'), (col_y, 'bn3')] elif shortnam in ['rbase']: rules_y = natural_y = [(col_x, 'All')] else: rules_y = natural_y = [(col_x, shortnam)] else: rules_y = natural_y = [(col_x, '@')] elif condensed_y: if shortnam=='Block net': rules_y = natural_y = [ (col_y, 'bn1'), (col_y, 'bn2'), (col_y, 'bn3')] elif shortnam in ['rbase']: rules_y = natural_y = [(col_y, 'All')] else: rules_y = natural_y = [(col_y, shortnam)] else: fy = frequency(meta, data, y=col_y) natural_y = fy.columns.values.tolist() natural_y.remove((col_y, 'All')) return rules_x, natural_x, rules_y, natural_y def str_index_values(index): """ Make sure level 1 of the multiindex are all strings """ values = index.values.tolist() values = zip(*[zip(*values)[0], [str(i) for i in zip(*values)[1]]]) return values def confirm_index_columns(self, df, expected_x, expected_y): """ Confirms index and columns are as expected. """ # global COUNTER # actual_x = str_index_values(df.index) # actual_y = str_index_values(df.columns) actual_x = df.index.values.tolist() actual_y = df.columns.values.tolist() # print # print actual_x # print expected_x # print actual_y # print expected_y # Remove xtotal from columns if present if len(df.columns.levels[0])>1: actual_y = actual_y[1:] self.assertEqual(actual_x, expected_x) self.assertEqual(actual_y, expected_y) # COUNTER = COUNTER + 2 # print COUNTER def get_stack(self, meta, data, xks, yks, views, weights, extras=False, coltests=False, flag_bases=None): stack = Stack('test') stack.add_data('test', data, meta) stack.add_link(x=xks, y=yks, views=views, weights=weights) if extras or coltests: # Add a basic net net_views = ViewMapper( template={ 'method': QuantipyViews().frequency, 'kwargs': {'iterators': {'rel_to': [None, 'y']}}}) net_views.add_method( name='Net 1-3', kwargs={'logic': [1, 2, 3], 'axis': 'x', 'text': {'en-GB': '1-3'}}) stack.add_link(x=xks, y=yks, views=net_views, weights=weights) # Add block net net_views.add_method( name='Block net', kwargs={ 'logic': [ {'bn1': [1, 2]}, {'bn2': [2, 3]}, {'bn3': [1, 3]}], 'axis': 'x'}) stack.add_link(x=xks, y=yks, views=net_views.subset(['Block net']), weights=weights) # Add NPS ## TO DO # Add standard deviation stddev_views = ViewMapper( template = { 'method': QuantipyViews().descriptives, 'kwargs': {'stats': 'stddev'}}) stddev_views.add_method(name='stddev') stack.add_link(x=xks, y=yks, views=stddev_views, weights=weights) if coltests: if flag_bases is None: test_views = ViewMapper( template={ 'method': QuantipyViews().coltests, 'kwargs': { 'mimic': 'askia', 'iterators': { 'metric': ['props', 'means'], 'level': ['low', 'mid', 'high']}}}) else: test_views = ViewMapper( template={ 'method': QuantipyViews().coltests, 'kwargs': { 'mimic': 'Dim', 'flag_bases': flag_bases, 'iterators': { 'metric': ['props', 'means'], 'level': ['low', 'mid', 'high']}}}) test_views.add_method('askia tests') stack.add_link(x=xks, y=yks, views=test_views) return stack
Quantipy/quantipy
tests/test_rules.py
Python
mit
81,385
0.003883
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2018 # Leandro Toledo de Souza <devs@python-telegram-bot.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains the ConversationHandler.""" import logging from telegram import Update from telegram.ext import (Handler, CallbackQueryHandler, InlineQueryHandler, ChosenInlineResultHandler) from telegram.utils.promise import Promise from MongoDict import MongoDict class ConversationHandler(Handler): """ A handler to hold a conversation with a single user by managing four collections of other handlers. Note that neither posts in Telegram Channels, nor group interactions with multiple users are managed by instances of this class. The first collection, a ``list`` named :attr:`entry_points`, is used to initiate the conversation, for example with a :class:`telegram.ext.CommandHandler` or :class:`telegram.ext.RegexHandler`. The second collection, a ``dict`` named :attr:`states`, contains the different conversation steps and one or more associated handlers that should be used if the user sends a message when the conversation with them is currently in that state. You will probably use mostly :class:`telegram.ext.MessageHandler` and :class:`telegram.ext.RegexHandler` here. The third collection, a ``list`` named :attr:`fallbacks`, is used if the user is currently in a conversation but the state has either no associated handler or the handler that is associated to the state is inappropriate for the update, for example if the update contains a command, but a regular text message is expected. You could use this for a ``/cancel`` command or to let the user know their message was not recognized. The fourth, optional collection of handlers, a ``list`` named :attr:`timed_out_behavior` is used if the wait for ``run_async`` takes longer than defined in :attr:`run_async_timeout`. For example, you can let the user know that they should wait for a bit before they can continue. To change the state of conversation, the callback function of a handler must return the new state after responding to the user. If it does not return anything (returning ``None`` by default), the state will not change. To end the conversation, the callback function must return :attr:`END` or ``-1``. Attributes: entry_points (List[:class:`telegram.ext.Handler`]): A list of ``Handler`` objects that can trigger the start of the conversation. states (Dict[:obj:`object`, List[:class:`telegram.ext.Handler`]]): A :obj:`dict` that defines the different states of conversation a user can be in and one or more associated ``Handler`` objects that should be used in that state. fallbacks (List[:class:`telegram.ext.Handler`]): A list of handlers that might be used if the user is in a conversation, but every handler for their current state returned ``False`` on :attr:`check_update`. allow_reentry (:obj:`bool`): Optional. Determines if a user can restart a conversation with an entry point. run_async_timeout (:obj:`float`): Optional. The time-out for ``run_async`` decorated Handlers. timed_out_behavior (List[:class:`telegram.ext.Handler`]): Optional. A list of handlers that might be used if the wait for ``run_async`` timed out. per_chat (:obj:`bool`): Optional. If the conversationkey should contain the Chat's ID. per_user (:obj:`bool`): Optional. If the conversationkey should contain the User's ID. per_message (:obj:`bool`): Optional. If the conversationkey should contain the Message's ID. conversation_timeout (:obj:`float`|:obj:`datetime.timedelta`): Optional. When this handler is inactive more than this timeout (in seconds), it will be automatically ended. If this value is 0 (default), there will be no timeout. Args: entry_points (List[:class:`telegram.ext.Handler`]): A list of ``Handler`` objects that can trigger the start of the conversation. The first handler which :attr:`check_update` method returns ``True`` will be used. If all return ``False``, the update is not handled. states (Dict[:obj:`object`, List[:class:`telegram.ext.Handler`]]): A :obj:`dict` that defines the different states of conversation a user can be in and one or more associated ``Handler`` objects that should be used in that state. The first handler which :attr:`check_update` method returns ``True`` will be used. fallbacks (List[:class:`telegram.ext.Handler`]): A list of handlers that might be used if the user is in a conversation, but every handler for their current state returned ``False`` on :attr:`check_update`. The first handler which :attr:`check_update` method returns ``True`` will be used. If all return ``False``, the update is not handled. allow_reentry (:obj:`bool`, optional): If set to ``True``, a user that is currently in a conversation can restart the conversation by triggering one of the entry points. run_async_timeout (:obj:`float`, optional): If the previous handler for this user was running asynchronously using the ``run_async`` decorator, it might not be finished when the next message arrives. This timeout defines how long the conversation handler should wait for the next state to be computed. The default is ``None`` which means it will wait indefinitely. timed_out_behavior (List[:class:`telegram.ext.Handler`], optional): A list of handlers that might be used if the wait for ``run_async`` timed out. The first handler which :attr:`check_update` method returns ``True`` will be used. If all return ``False``, the update is not handled. per_chat (:obj:`bool`, optional): If the conversationkey should contain the Chat's ID. Default is ``True``. per_user (:obj:`bool`, optional): If the conversationkey should contain the User's ID. Default is ``True``. per_message (:obj:`bool`, optional): If the conversationkey should contain the Message's ID. Default is ``False``. conversation_timeout (:obj:`float`|:obj:`datetime.timedelta`, optional): When this handler is inactive more than this timeout (in seconds), it will be automatically ended. If this value is 0 or None (default), there will be no timeout. Raises: ValueError """ END = -1 """:obj:`int`: Used as a constant to return when a conversation is ended.""" def __init__(self, entry_points, states, fallbacks, allow_reentry=False, run_async_timeout=None, timed_out_behavior=None, per_chat=True, per_user=True, per_message=False, conversation_timeout=None, collection=None): self.logger = logging.getLogger(__name__) self.entry_points = entry_points self.states = states self.fallbacks = fallbacks self.allow_reentry = allow_reentry self.run_async_timeout = run_async_timeout self.timed_out_behavior = timed_out_behavior self.per_user = per_user self.per_chat = per_chat self.per_message = per_message self.conversation_timeout = conversation_timeout self.timeout_jobs = dict() self.conversations = MongoDict(collection=collection, warm_cache=True) self.logger.info("Conversations: %s", self.conversations.idb) self.current_conversation = None self.current_handler = None if not any((self.per_user, self.per_chat, self.per_message)): raise ValueError("'per_user', 'per_chat' and 'per_message' can't all be 'False'") if self.per_message and not self.per_chat: logging.warning("If 'per_message=True' is used, 'per_chat=True' should also be used, " "since message IDs are not globally unique.") all_handlers = list() all_handlers.extend(entry_points) all_handlers.extend(fallbacks) for state_handlers in states.values(): all_handlers.extend(state_handlers) if self.per_message: for handler in all_handlers: if not isinstance(handler, CallbackQueryHandler): logging.warning("If 'per_message=True', all entry points and state handlers" " must be 'CallbackQueryHandler', since no other handlers " "have a message context.") else: for handler in all_handlers: if isinstance(handler, CallbackQueryHandler): logging.warning("If 'per_message=False', 'CallbackQueryHandler' will not be " "tracked for every message.") if self.per_chat: for handler in all_handlers: if isinstance(handler, (InlineQueryHandler, ChosenInlineResultHandler)): logging.warning("If 'per_chat=True', 'InlineQueryHandler' can not be used, " "since inline queries have no chat context.") def _get_key(self, update): chat = update.effective_chat user = update.effective_user key = list() if self.per_chat: key.append(chat.id) if self.per_user and user is not None: key.append(user.id) if self.per_message: key.append(update.callback_query.inline_message_id or update.callback_query.message.message_id) return tuple(key) def check_update(self, update): """ Determines whether an update should be handled by this conversationhandler, and if so in which state the conversation currently is. Args: update (:class:`telegram.Update`): Incoming telegram update. Returns: :obj:`bool` """ # Ignore messages in channels if (not isinstance(update, Update) or update.channel_post or self.per_chat and not update.effective_chat or self.per_message and not update.callback_query or update.callback_query and self.per_chat and not update.callback_query.message): return False key = self._get_key(update) state = self.conversations.get(key) # Resolve promises if isinstance(state, tuple) and len(state) is 2 and isinstance(state[1], Promise): self.logger.debug('waiting for promise...') old_state, new_state = state error = False try: res = new_state.result(timeout=self.run_async_timeout) except Exception as exc: self.logger.exception("Promise function raised exception") self.logger.exception("{}".format(exc)) error = True if not error and new_state.done.is_set(): self.update_state(res, key) state = self.conversations.get(key) else: for candidate in (self.timed_out_behavior or []): if candidate.check_update(update): # Save the current user and the selected handler for handle_update self.current_conversation = key self.current_handler = candidate return True else: return False self.logger.debug('selecting conversation %s with state %s' % (str(key), str(state))) handler = None # Search entry points for a match if state is None or self.allow_reentry: for entry_point in self.entry_points: if entry_point.check_update(update): handler = entry_point break else: if state is None: return False # Get the handler list for current state, if we didn't find one yet and we're still here if state is not None and not handler: handlers = self.states.get(state) for candidate in (handlers or []): if candidate.check_update(update): handler = candidate break # Find a fallback handler if all other handlers fail else: for fallback in self.fallbacks: if fallback.check_update(update): handler = fallback break else: return False # Save the current user and the selected handler for handle_update self.current_conversation = key self.current_handler = handler return True def handle_update(self, update, dispatcher): """Send the update to the callback for the current state and Handler Args: update (:class:`telegram.Update`): Incoming telegram update. dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that originated the Update. """ new_state = self.current_handler.handle_update(update, dispatcher) timeout_job = self.timeout_jobs.pop(self.current_conversation, None) if timeout_job is not None: timeout_job.schedule_removal() if self.conversation_timeout and new_state != self.END: self.timeout_jobs[self.current_conversation] = dispatcher.job_queue.run_once( self._trigger_timeout, self.conversation_timeout, context=self.current_conversation ) self.update_state(new_state, self.current_conversation) def update_state(self, new_state, key): if new_state == self.END: if key in self.conversations: del self.conversations[key] else: pass elif isinstance(new_state, Promise): self.conversations[key] = (self.conversations.get(key), new_state) elif new_state is not None: self.conversations[key] = new_state def _trigger_timeout(self, bot, job): del self.timeout_jobs[job.context] self.update_state(self.END, job.context)
d-qoi/TelegramBots
RoseAssassins/cust_handlers/conversationhandler.py
Python
lgpl-3.0
15,351
0.004755
# -*- coding: utf-8 -*- """ Consolidate any user interface rgw calls for Wolffish and openATTIC. All operations will happen using the rest-api of RadosGW. The one execption is getting the credentials for an administrative user which is implemented here. """ import logging import os import json import re import glob import salt.client import salt.utils.minions log = logging.getLogger(__name__) class Radosgw(object): """ Return a structure containing S3 keys and urls """ def __init__(self, canned=None, cluster='ceph', pathname='/srv/salt/ceph/rgw/cache'): """ Initialize and call routines """ if canned: self._canned(int(canned)) else: self.cluster = cluster self.credentials = {'access_key': None, 'secret_key': None, 'urls': [], 'success': False} self.pathname = pathname self._admin() self._urls() def _canned(self, canned): """ Return examples for debugging without a working Ceph cluster """ if canned == 1: self.credentials = {'access_key': "ABCDEFGHIJKLMNOPQRST", 'secret_key': "0123456789012345678901234567890123456789", 'urls': ["http://rgw1"]} elif canned == 2: self.credentials = {'access_key': "ABCDEFGHIJKLMNOPQRST", 'secret_key': "0123456789012345678901234567890123456789", 'urls': ["http://red1", "http://red2", "http://blue1:8000", "http://blue2:8000"]} def _admin(self, filename="user.admin.json"): """ Expect admin user file; otherwise, search for first system user. Update access_key, secret_key """ filepath = "{}/{}".format(self.pathname, filename) if os.path.exists(filepath): user = json.loads(open(filepath).read()) else: user = None for user_file in glob.glob("{}/user.*".format(self.pathname)): user = json.loads(open(user_file).read()) if 'system' in user and user['system'] == "true": break user = None if not user: # No system user log.error("No system user for radosgw found") return self.credentials['access_key'] = user['keys'][0]['access_key'] self.credentials['secret_key'] = user['keys'][0]['secret_key'] self.credentials['success'] = True def _urls(self): """ Check for user defined endpoint; otherwise, return list of gateways as urls. """ search = "I@cluster:{}".format(self.cluster) __opts__ = salt.config.client_config('/etc/salt/master') pillar_util = salt.utils.master.MasterPillarUtil(search, "compound", use_cached_grains=True, grains_fallback=False, opts=__opts__) cached = pillar_util.get_minion_pillar() for minion in cached: if 'rgw_endpoint' in cached[minion]: self.credentials['urls'].append(cached[minion]['rgw_endpoint']) return port = '7480' # civetweb default port ssl = '' found = False for rgw_conf_file_path in glob.glob("/srv/salt/ceph/configuration/files/ceph.conf.*"): if os.path.exists(rgw_conf_file_path) and os.path.isfile(rgw_conf_file_path): with open(rgw_conf_file_path) as rgw_conf_file: for line in rgw_conf_file: if line: match = re.search(r'rgw.*frontends.*=.*port=(\d+)(s?)', line) if match: port = match.group(1) ssl = match.group(2) found = True if found: break for client_file in glob.glob("{}/client.*".format(self.pathname)): parts = client_file.split('.') resource = '' # dedicated keys - use host part if len(parts) == 4: resource = parts[2] # shared keys - use role part if len(parts) == 3: resource = parts[1] if resource and port: resource += ":{}".format(port) if resource: self.credentials['urls'].append("http{}://{}".format(ssl, resource)) def credentials(canned=None, **kwargs): """ Return the administrative credentials for the RadosGW """ radosgw = Radosgw(canned) return radosgw.credentials
supriti/DeepSea
srv/modules/runners/ui_rgw.py
Python
gpl-3.0
5,055
0.001583
from re import compile as regex def matches(patts, filename): for p in patts: if not p.match(filename) is None: return True return False class SpecialSection(): def __init__(self, name, pathPatterns, filePatterns, all_conditions = False): self.name = name self.allcond = all_conditions self.fpat = [] self.ppat = [] for pat in filePatterns: self.fpat.append(regex(pat)) for pat in pathPatterns: self.ppat.append(regex(pat)) def match(self, filename, path): fmatch = matches(self.fpat, filename) if fmatch and not self.allcond: return True pmatch = matches(self.ppat, path) if pmatch and (fmatch or not self.allcond): return True return False specialSections = [ SpecialSection("Прилагательное", [], ["^.*quests/generated/pools/guardthemes\.config$"]), SpecialSection("Винительный падеж", [], ["^.*quests/generated/pools/weapon\.config$"]), SpecialSection("Имена персонажей", [], ["^.*namegen\.config$", "^.*\.namesource$"]), SpecialSection("Наречие", [], ["^.*pools/hatadjectives.config$"]), SpecialSection("Регулярное выражение (не для перевода, а для поддержки названий на кирилице)", ["^.*/regex$"], ["^.*\.config$"], True), SpecialSection("Привязанное к полу прилагательное", ["^.*generatedText/fluff/2/.*$"], ["^.*quests/generated/templates/spread_rumors.questtemplate$"], True), SpecialSection("Предложный падеж", ["^.*generatedText/fluff/3/.*$"], ["^.*quests/generated/templates/escort\.questtemplate$"], True), SpecialSection("Предложный падеж", [".*generatedText/fluff/5/.*$"], ["^.*quests/generated/templates/kidnapping\.questtemplate$"], True), SpecialSection("Множественное число", ["^.*generatedText/fluff/3/.*$"], ["^.*kill_monster_group\.questtemplate$"], True), SpecialSection("Родительный падеж", ["^.+/name$"], ["^.*pools/monsterthreats\.config$"], True), SpecialSection("Префикс названия банды", ["^.*Prefix/.*"], ["^.*quests/bounty/gang\.config"], True), SpecialSection("Основная часть названия банды", ["^.*Mid/.*"], ["^.*quests/bounty/gang\.config"], True), SpecialSection("Окончание названия банды", ["^.*suffix/.*"], ["^.*quests/bounty/gang\.config"], True), SpecialSection("Префикс главаря банды", ["^.*prefix/.*"], ["^.*quests/bounty/bounty\.config"], True), SpecialSection("Окончание главаря банды", ["^.*suffix/.*"], ["^.*quests/bounty/bounty\.config"], True), ]
SBT-community/Starbound_RU
tools/special_cases.py
Python
apache-2.0
2,754
0.017234
import pytest from cinp.common import URI # TODO: test mutli-object setting def test_splituri_builduri(): # TODO: test invlid URIs, mabey remove some tests from client_test that are just checking the URI uri = URI( '/api/v1/' ) ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/' ) assert ns == [] assert model is None assert id_list is None assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/' ns = None assert uri.build( ns, model, action, id_list ) == '/api/v1/' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/' ) assert ns == [ 'ns' ] assert model is None assert id_list is None assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/' ns = 'ns' assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model' ) assert ns == [ 'ns' ] assert model == 'model' assert id_list is None assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model' id_list = [] assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/ns2/' ) assert ns == [ 'ns', 'ns2' ] assert model is None assert id_list is None assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/ns2/' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/ns2/model' ) assert ns == [ 'ns', 'ns2' ] assert model == 'model' assert id_list is None assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/ns2/model' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model::' ) assert ns == [ 'ns' ] assert model == 'model' assert id_list == [ '' ] assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model::' id_list = '' assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model::' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model:ghj:' ) assert ns == [ 'ns' ] assert model == 'model' assert id_list == [ 'ghj' ] assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:ghj:' id_list = 'ghj' assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:ghj:' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model:ghj:dsf:sfe:' ) assert ns == [ 'ns' ] assert model == 'model' assert id_list == [ 'ghj', 'dsf', 'sfe' ] assert action is None assert multi is True assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:ghj:dsf:sfe:' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model(action)' ) assert ns == [ 'ns' ] assert model == 'model' assert id_list is None assert action == 'action' assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model(action)' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model:sdf:(action)' ) assert ns == [ 'ns' ] assert model == 'model' assert id_list == [ 'sdf' ] assert action == 'action' assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:sdf:(action)' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/model:sdf:eed:(action)' ) assert ns == [ 'ns' ] assert model == 'model' assert id_list == [ 'sdf', 'eed' ] assert action == 'action' assert multi is True assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/model:sdf:eed:(action)' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/', root_optional=True ) assert ns == [] assert model is None assert id_list is None assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/' assert uri.build( ns, model, action, id_list, in_root=False ) == '/' with pytest.raises( ValueError ): ( ns, model, action, id_list, multi ) = uri.split( '/', root_optional=False ) ( ns, model, action, id_list, multi ) = uri.split( '/', root_optional=True ) assert ns == [] assert model is None assert id_list is None assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/' assert uri.build( ns, model, action, id_list, in_root=False ) == '/' ( ns, model, action, id_list, multi ) = uri.split( '/api/v1/ns/', root_optional=True ) assert ns == [ 'ns' ] assert model is None assert id_list is None assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/' assert uri.build( ns, model, action, id_list, in_root=False ) == '/ns/' with pytest.raises( ValueError ): ( ns, model, action, id_list, multi ) = uri.split( '/ns/', root_optional=False ) ( ns, model, action, id_list, multi ) = uri.split( '/ns/', root_optional=True ) assert ns == [ 'ns' ] assert model is None assert id_list is None assert action is None assert multi is False assert uri.build( ns, model, action, id_list ) == '/api/v1/ns/' assert uri.build( ns, model, action, id_list, in_root=False ) == '/ns/' def test_extract_ids(): uri = URI( '/api/v1/' ) id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:', '/api/v1/ns/model:rfv:' ] assert uri.extractIds( id_list ) == [ 'sdf', '234', 'rfv' ] id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:www:', '/api/v1/ns/model:rfv:' ] assert uri.extractIds( id_list ) == [ 'sdf', '234', 'www', 'rfv' ] id_list = [ '/api/v1/ns/model:234:www:' ] assert uri.extractIds( id_list ) == [ '234', 'www' ] id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:www', '/api/v1/ns/model:rfv:' ] with pytest.raises( ValueError ): uri.extractIds( id_list ) id_list = [ '/api/v1/ns/model:sdf' ] with pytest.raises( ValueError ): uri.extractIds( id_list ) id_list = [ '/api/v1/ns/model' ] uri.extractIds( id_list ) == [] id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model', '/api/v1/ns/model:rfv:' ] uri.extractIds( id_list ) == [ 'sdf', 'rfv' ] assert uri.extractIds( [] ) == [] def test_urilist_to_uri(): uri = URI( '/api/v1/' ) id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:', '/api/v1/ns/model:rfv:' ] assert uri.uriListToMultiURI( id_list ) == '/api/v1/ns/model:sdf:234:rfv:' id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:www:', '/api/v1/ns/model:rfv:' ] assert uri.uriListToMultiURI( id_list ) == '/api/v1/ns/model:sdf:234:www:rfv:' id_list = [ '/api/v1/ns/model:234:www:' ] assert uri.uriListToMultiURI( id_list ) == '/api/v1/ns/model:234:www:' id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model:234:www', '/api/v1/ns/model:rfv:' ] with pytest.raises( ValueError ): uri.uriListToMultiURI( id_list ) id_list = [ '/api/v1/ns/model' ] uri.uriListToMultiURI( id_list ) == [] id_list = [ '/api/v1/ns/model:sdf:', '/api/v1/ns/model', '/api/v1/ns/model:rfv:' ] uri.uriListToMultiURI( id_list ) == '/api/v1/ns/model:sdf:rfv:' assert uri.uriListToMultiURI( [] ) == []
cinp/python
cinp/common_test.py
Python
apache-2.0
7,291
0.055411
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example creates new teams. To determine which teams exist, run get_all_teams.py. The LoadFromStorage method is pulling credentials and properties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README. """ import uuid # Import appropriate modules from the client library. from googleads import dfp def main(client): # Initialize appropriate service. team_service = client.GetService('TeamService', version='v201411') # Create team objects. teams = [] for i in xrange(5): team = { 'name': 'Team %s' % uuid.uuid4(), 'hasAllCompanies': 'false', 'hasAllInventory': 'false', 'teamAccessType': 'READ_WRITE' } teams.append(team) # Add Teams. teams = team_service.createTeams(teams) # Display results. for team in teams: print ('Team with ID \'%s\' and name \'%s\' was created.' % (team['id'], team['name'])) if __name__ == '__main__': # Initialize client object. dfp_client = dfp.DfpClient.LoadFromStorage() main(dfp_client)
wubr2000/googleads-python-lib
examples/dfp/v201411/team_service/create_teams.py
Python
apache-2.0
1,762
0.007946
# sqlalchemy/events.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Core event interfaces.""" from . import event, exc from .pool import Pool from .engine import Connectable, Engine, Dialect from .sql.base import SchemaEventTarget class DDLEvents(event.Events): """ Define event listeners for schema objects, that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget` subclasses, including :class:`.MetaData`, :class:`.Table`, :class:`.Column`. :class:`.MetaData` and :class:`.Table` support events specifically regarding when CREATE and DROP DDL is emitted to the database. Attachment events are also provided to customize behavior whenever a child schema element is associated with a parent, such as, when a :class:`.Column` is associated with its :class:`.Table`, when a :class:`.ForeignKeyConstraint` is associated with a :class:`.Table`, etc. Example using the ``after_create`` event:: from sqlalchemy import event from sqlalchemy import Table, Column, Metadata, Integer m = MetaData() some_table = Table('some_table', m, Column('data', Integer)) def after_create(target, connection, **kw): connection.execute("ALTER TABLE %s SET name=foo_%s" % (target.name, target.name)) event.listen(some_table, "after_create", after_create) DDL events integrate closely with the :class:`.DDL` class and the :class:`.DDLElement` hierarchy of DDL clause constructs, which are themselves appropriate as listener callables:: from sqlalchemy import DDL event.listen( some_table, "after_create", DDL("ALTER TABLE %(table)s SET name=foo_%(table)s") ) The methods here define the name of an event as well as the names of members that are passed to listener functions. See also: :ref:`event_toplevel` :class:`.DDLElement` :class:`.DDL` :ref:`schema_ddl_sequences` """ _target_class_doc = "SomeSchemaClassOrObject" _dispatch_target = SchemaEventTarget def before_create(self, target, connection, **kw): """Called before CREATE statements are emitted. :param target: the :class:`.MetaData` or :class:`.Table` object which is the target of the event. :param connection: the :class:`.Connection` where the CREATE statement or statements will be emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. """ def after_create(self, target, connection, **kw): """Called after CREATE statements are emitted. :param target: the :class:`.MetaData` or :class:`.Table` object which is the target of the event. :param connection: the :class:`.Connection` where the CREATE statement or statements have been emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. """ def before_drop(self, target, connection, **kw): """Called before DROP statements are emitted. :param target: the :class:`.MetaData` or :class:`.Table` object which is the target of the event. :param connection: the :class:`.Connection` where the DROP statement or statements will be emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. """ def after_drop(self, target, connection, **kw): """Called after DROP statements are emitted. :param target: the :class:`.MetaData` or :class:`.Table` object which is the target of the event. :param connection: the :class:`.Connection` where the DROP statement or statements have been emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. """ def before_parent_attach(self, target, parent): """Called before a :class:`.SchemaItem` is associated with a parent :class:`.SchemaItem`. :param target: the target object :param parent: the parent to which the target is being attached. :func:`.event.listen` also accepts a modifier for this event: :param propagate=False: When True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`.Table.tometadata` is used. """ def after_parent_attach(self, target, parent): """Called after a :class:`.SchemaItem` is associated with a parent :class:`.SchemaItem`. :param target: the target object :param parent: the parent to which the target is being attached. :func:`.event.listen` also accepts a modifier for this event: :param propagate=False: When True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`.Table.tometadata` is used. """ def column_reflect(self, inspector, table, column_info): """Called for each unit of 'column info' retrieved when a :class:`.Table` is being reflected. The dictionary of column information as returned by the dialect is passed, and can be modified. The dictionary is that returned in each element of the list returned by :meth:`.reflection.Inspector.get_columns`. The event is called before any action is taken against this dictionary, and the contents can be modified. The :class:`.Column` specific arguments ``info``, ``key``, and ``quote`` can also be added to the dictionary and will be passed to the constructor of :class:`.Column`. Note that this event is only meaningful if either associated with the :class:`.Table` class across the board, e.g.:: from sqlalchemy.schema import Table from sqlalchemy import event def listen_for_reflect(inspector, table, column_info): "receive a column_reflect event" # ... event.listen( Table, 'column_reflect', listen_for_reflect) ...or with a specific :class:`.Table` instance using the ``listeners`` argument:: def listen_for_reflect(inspector, table, column_info): "receive a column_reflect event" # ... t = Table( 'sometable', autoload=True, listeners=[ ('column_reflect', listen_for_reflect) ]) This because the reflection process initiated by ``autoload=True`` completes within the scope of the constructor for :class:`.Table`. """ class PoolEvents(event.Events): """Available events for :class:`.Pool`. The methods here define the name of an event as well as the names of members that are passed to listener functions. e.g.:: from sqlalchemy import event def my_on_checkout(dbapi_conn, connection_rec, connection_proxy): "handle an on checkout event" event.listen(Pool, 'checkout', my_on_checkout) In addition to accepting the :class:`.Pool` class and :class:`.Pool` instances, :class:`.PoolEvents` also accepts :class:`.Engine` objects and the :class:`.Engine` class as targets, which will be resolved to the ``.pool`` attribute of the given engine or the :class:`.Pool` class:: engine = create_engine("postgresql://scott:tiger@localhost/test") # will associate with engine.pool event.listen(engine, 'checkout', my_on_checkout) """ _target_class_doc = "SomeEngineOrPool" _dispatch_target = Pool @classmethod def _accept_with(cls, target): if isinstance(target, type): if issubclass(target, Engine): return Pool elif issubclass(target, Pool): return target elif isinstance(target, Engine): return target.pool else: return target def connect(self, dbapi_connection, connection_record): """Called at the moment a particular DBAPI connection is first created for a given :class:`.Pool`. This event allows one to capture the point directly after which the DBAPI module-level ``.connect()`` method has been used in order to produce a new DBAPI connection. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. """ def first_connect(self, dbapi_connection, connection_record): """Called exactly once for the first time a DBAPI connection is checked out from a particular :class:`.Pool`. The rationale for :meth:`.PoolEvents.first_connect` is to determine information about a particular series of database connections based on the settings used for all connections. Since a particular :class:`.Pool` refers to a single "creator" function (which in terms of a :class:`.Engine` refers to the URL and connection options used), it is typically valid to make observations about a single connection that can be safely assumed to be valid about all subsequent connections, such as the database version, the server and client encoding settings, collation settings, and many others. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. """ def checkout(self, dbapi_connection, connection_record, connection_proxy): """Called when a connection is retrieved from the Pool. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. :param connection_proxy: the :class:`._ConnectionFairy` object which will proxy the public interface of the DBAPI connection for the lifespan of the checkout. If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current connection will be disposed and a fresh connection retrieved. Processing of all checkout listeners will abort and restart using the new connection. .. seealso:: :meth:`.ConnectionEvents.engine_connect` - a similar event which occurs upon creation of a new :class:`.Connection`. """ def checkin(self, dbapi_connection, connection_record): """Called when a connection returns to the pool. Note that the connection may be closed, and may be None if the connection has been invalidated. ``checkin`` will not be called for detached connections. (They do not return to the pool.) :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. """ def reset(self, dbapi_connnection, connection_record): """Called before the "reset" action occurs for a pooled connection. This event represents when the ``rollback()`` method is called on the DBAPI connection before it is returned to the pool. The behavior of "reset" can be controlled, including disabled, using the ``reset_on_return`` pool argument. The :meth:`.PoolEvents.reset` event is usually followed by the :meth:`.PoolEvents.checkin` event is called, except in those cases where the connection is discarded immediately after reset. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. .. versionadded:: 0.8 .. seealso:: :meth:`.ConnectionEvents.rollback` :meth:`.ConnectionEvents.commit` """ def invalidate(self, dbapi_connection, connection_record, exception): """Called when a DBAPI connection is to be "invalidated". This event is called any time the :meth:`._ConnectionRecord.invalidate` method is invoked, either from API usage or via "auto-invalidation". The event occurs before a final attempt to call ``.close()`` on the connection occurs. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. :param exception: the exception object corresponding to the reason for this invalidation, if any. May be ``None``. .. versionadded:: 0.9.2 Added support for connection invalidation listening. .. seealso:: :ref:`pool_connection_invalidation` """ class ConnectionEvents(event.Events): """Available events for :class:`.Connectable`, which includes :class:`.Connection` and :class:`.Engine`. The methods here define the name of an event as well as the names of members that are passed to listener functions. An event listener can be associated with any :class:`.Connectable` class or instance, such as an :class:`.Engine`, e.g.:: from sqlalchemy import event, create_engine def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): log.info("Received statement: %s" % statement) engine = create_engine('postgresql://scott:tiger@localhost/test') event.listen(engine, "before_cursor_execute", before_cursor_execute) or with a specific :class:`.Connection`:: with engine.begin() as conn: @event.listens_for(conn, 'before_cursor_execute') def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): log.info("Received statement: %s" % statement) When the methods are called with a `statement` parameter, such as in :meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and :meth:`.dbapi_error`, the statement is the exact SQL string that was prepared for transmission to the DBAPI ``cursor`` in the connection's :class:`.Dialect`. The :meth:`.before_execute` and :meth:`.before_cursor_execute` events can also be established with the ``retval=True`` flag, which allows modification of the statement and parameters to be sent to the database. The :meth:`.before_cursor_execute` event is particularly useful here to add ad-hoc string transformations, such as comments, to all executions:: from sqlalchemy.engine import Engine from sqlalchemy import event @event.listens_for(Engine, "before_cursor_execute", retval=True) def comment_sql_calls(conn, cursor, statement, parameters, context, executemany): statement = statement + " -- some comment" return statement, parameters .. note:: :class:`.ConnectionEvents` can be established on any combination of :class:`.Engine`, :class:`.Connection`, as well as instances of each of those classes. Events across all four scopes will fire off for a given instance of :class:`.Connection`. However, for performance reasons, the :class:`.Connection` object determines at instantiation time whether or not its parent :class:`.Engine` has event listeners established. Event listeners added to the :class:`.Engine` class or to an instance of :class:`.Engine` *after* the instantiation of a dependent :class:`.Connection` instance will usually *not* be available on that :class:`.Connection` instance. The newly added listeners will instead take effect for :class:`.Connection` instances created subsequent to those event listeners being established on the parent :class:`.Engine` class or instance. :param retval=False: Applies to the :meth:`.before_execute` and :meth:`.before_cursor_execute` events only. When True, the user-defined event function must have a return value, which is a tuple of parameters that replace the given statement and parameters. See those methods for a description of specific return arguments. .. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated with any :class:`.Connectable` including :class:`.Connection`, in addition to the existing support for :class:`.Engine`. """ _target_class_doc = "SomeEngine" _dispatch_target = Connectable @classmethod def _listen(cls, event_key, retval=False): target, identifier, fn = \ event_key.dispatch_target, event_key.identifier, \ event_key._listen_fn target._has_events = True if not retval: if identifier == 'before_execute': orig_fn = fn def wrap_before_execute(conn, clauseelement, multiparams, params): orig_fn(conn, clauseelement, multiparams, params) return clauseelement, multiparams, params fn = wrap_before_execute elif identifier == 'before_cursor_execute': orig_fn = fn def wrap_before_cursor_execute(conn, cursor, statement, parameters, context, executemany): orig_fn(conn, cursor, statement, parameters, context, executemany) return statement, parameters fn = wrap_before_cursor_execute elif retval and \ identifier not in ('before_execute', 'before_cursor_execute', 'handle_error'): raise exc.ArgumentError( "Only the 'before_execute', " "'before_cursor_execute' and 'handle_error' engine " "event listeners accept the 'retval=True' " "argument.") event_key.with_wrapper(fn).base_listen() def before_execute(self, conn, clauseelement, multiparams, params): """Intercept high level execute() events, receiving uncompiled SQL constructs and other objects prior to rendering into SQL. This event is good for debugging SQL compilation issues as well as early manipulation of the parameters being sent to the database, as the parameter lists will be in a consistent format here. This event can be optionally established with the ``retval=True`` flag. The ``clauseelement``, ``multiparams``, and ``params`` arguments should be returned as a three-tuple in this case:: @event.listens_for(Engine, "before_execute", retval=True) def before_execute(conn, conn, clauseelement, multiparams, params): # do something with clauseelement, multiparams, params return clauseelement, multiparams, params :param conn: :class:`.Connection` object :param clauseelement: SQL expression construct, :class:`.Compiled` instance, or string statement passed to :meth:`.Connection.execute`. :param multiparams: Multiple parameter sets, a list of dictionaries. :param params: Single parameter set, a single dictionary. See also: :meth:`.before_cursor_execute` """ def after_execute(self, conn, clauseelement, multiparams, params, result): """Intercept high level execute() events after execute. :param conn: :class:`.Connection` object :param clauseelement: SQL expression construct, :class:`.Compiled` instance, or string statement passed to :meth:`.Connection.execute`. :param multiparams: Multiple parameter sets, a list of dictionaries. :param params: Single parameter set, a single dictionary. :param result: :class:`.ResultProxy` generated by the execution. """ def before_cursor_execute(self, conn, cursor, statement, parameters, context, executemany): """Intercept low-level cursor execute() events before execution, receiving the string SQL statement and DBAPI-specific parameter list to be invoked against a cursor. This event is a good choice for logging as well as late modifications to the SQL string. It's less ideal for parameter modifications except for those which are specific to a target backend. This event can be optionally established with the ``retval=True`` flag. The ``statement`` and ``parameters`` arguments should be returned as a two-tuple in this case:: @event.listens_for(Engine, "before_cursor_execute", retval=True) def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): # do something with statement, parameters return statement, parameters See the example at :class:`.ConnectionEvents`. :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object :param statement: string SQL statement, as to be passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. :param context: :class:`.ExecutionContext` object in use. May be ``None``. :param executemany: boolean, if ``True``, this is an ``executemany()`` call, if ``False``, this is an ``execute()`` call. See also: :meth:`.before_execute` :meth:`.after_cursor_execute` """ def after_cursor_execute(self, conn, cursor, statement, parameters, context, executemany): """Intercept low-level cursor execute() events after execution. :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object. Will have results pending if the statement was a SELECT, but these should not be consumed as they will be needed by the :class:`.ResultProxy`. :param statement: string SQL statement, as passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. :param context: :class:`.ExecutionContext` object in use. May be ``None``. :param executemany: boolean, if ``True``, this is an ``executemany()`` call, if ``False``, this is an ``execute()`` call. """ def dbapi_error(self, conn, cursor, statement, parameters, context, exception): """Intercept a raw DBAPI error. This event is called with the DBAPI exception instance received from the DBAPI itself, *before* SQLAlchemy wraps the exception with it's own exception wrappers, and before any other operations are performed on the DBAPI cursor; the existing transaction remains in effect as well as any state on the cursor. The use case here is to inject low-level exception handling into an :class:`.Engine`, typically for logging and debugging purposes. .. warning:: Code should **not** modify any state or throw any exceptions here as this will interfere with SQLAlchemy's cleanup and error handling routines. For exception modification, please refer to the new :meth:`.ConnectionEvents.handle_error` event. Subsequent to this hook, SQLAlchemy may attempt any number of operations on the connection/cursor, including closing the cursor, rolling back of the transaction in the case of connectionless execution, and disposing of the entire connection pool if a "disconnect" was detected. The exception is then wrapped in a SQLAlchemy DBAPI exception wrapper and re-thrown. :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object :param statement: string SQL statement, as passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. :param context: :class:`.ExecutionContext` object in use. May be ``None``. :param exception: The **unwrapped** exception emitted directly from the DBAPI. The class here is specific to the DBAPI module in use. .. deprecated:: 0.9.7 - replaced by :meth:`.ConnectionEvents.handle_error` """ def handle_error(self, exception_context): """Intercept all exceptions processed by the :class:`.Connection`. This includes all exceptions emitted by the DBAPI as well as within SQLAlchemy's statement invocation process, including encoding errors and other statement validation errors. Other areas in which the event is invoked include transaction begin and end, result row fetching, cursor creation. Note that :meth:`.handle_error` may support new kinds of exceptions and new calling scenarios at *any time*. Code which uses this event must expect new calling patterns to be present in minor releases. To support the wide variety of members that correspond to an exception, as well as to allow extensibility of the event without backwards incompatibility, the sole argument received is an instance of :class:`.ExceptionContext`. This object contains data members representing detail about the exception. Use cases supported by this hook include: * read-only, low-level exception handling for logging and debugging purposes * exception re-writing The hook is called while the cursor from the failed operation (if any) is still open and accessible. Special cleanup operations can be called on this cursor; SQLAlchemy will attempt to close this cursor subsequent to this hook being invoked. If the connection is in "autocommit" mode, the transaction also remains open within the scope of this hook; the rollback of the per-statement transaction also occurs after the hook is called. The user-defined event handler has two options for replacing the SQLAlchemy-constructed exception into one that is user defined. It can either raise this new exception directly, in which case all further event listeners are bypassed and the exception will be raised, after appropriate cleanup as taken place:: @event.listens_for(Engine, "handle_error") def handle_exception(context): if isinstance(context.original_exception, psycopg2.OperationalError) and \\ "failed" in str(context.original_exception): raise MySpecialException("failed operation") Alternatively, a "chained" style of event handling can be used, by configuring the handler with the ``retval=True`` modifier and returning the new exception instance from the function. In this case, event handling will continue onto the next handler. The "chained" exception is available using :attr:`.ExceptionContext.chained_exception`:: @event.listens_for(Engine, "handle_error", retval=True) def handle_exception(context): if context.chained_exception is not None and \\ "special" in context.chained_exception.message: return MySpecialException("failed", cause=context.chained_exception) Handlers that return ``None`` may remain within this chain; the last non-``None`` return value is the one that continues to be passed to the next handler. When a custom exception is raised or returned, SQLAlchemy raises this new exception as-is, it is not wrapped by any SQLAlchemy object. If the exception is not a subclass of :class:`sqlalchemy.exc.StatementError`, certain features may not be available; currently this includes the ORM's feature of adding a detail hint about "autoflush" to exceptions raised within the autoflush process. :param context: an :class:`.ExceptionContext` object. See this class for details on all available members. .. versionadded:: 0.9.7 Added the :meth:`.ConnectionEvents.handle_error` hook. """ def engine_connect(self, conn, branch): """Intercept the creation of a new :class:`.Connection`. This event is called typically as the direct result of calling the :meth:`.Engine.connect` method. It differs from the :meth:`.PoolEvents.connect` method, which refers to the actual connection to a database at the DBAPI level; a DBAPI connection may be pooled and reused for many operations. In contrast, this event refers only to the production of a higher level :class:`.Connection` wrapper around such a DBAPI connection. It also differs from the :meth:`.PoolEvents.checkout` event in that it is specific to the :class:`.Connection` object, not the DBAPI connection that :meth:`.PoolEvents.checkout` deals with, although this DBAPI connection is available here via the :attr:`.Connection.connection` attribute. But note there can in fact be multiple :meth:`.PoolEvents.checkout` events within the lifespan of a single :class:`.Connection` object, if that :class:`.Connection` is invalidated and re-established. There can also be multiple :class:`.Connection` objects generated for the same already-checked-out DBAPI connection, in the case that a "branch" of a :class:`.Connection` is produced. :param conn: :class:`.Connection` object. :param branch: if True, this is a "branch" of an existing :class:`.Connection`. A branch is generated within the course of a statement execution to invoke supplemental statements, most typically to pre-execute a SELECT of a default value for the purposes of an INSERT statement. .. versionadded:: 0.9.0 .. seealso:: :meth:`.PoolEvents.checkout` the lower-level pool checkout event for an individual DBAPI connection :meth:`.ConnectionEvents.set_connection_execution_options` - a copy of a :class:`.Connection` is also made when the :meth:`.Connection.execution_options` method is called. """ def set_connection_execution_options(self, conn, opts): """Intercept when the :meth:`.Connection.execution_options` method is called. This method is called after the new :class:`.Connection` has been produced, with the newly updated execution options collection, but before the :class:`.Dialect` has acted upon any of those new options. Note that this method is not called when a new :class:`.Connection` is produced which is inheriting execution options from its parent :class:`.Engine`; to intercept this condition, use the :meth:`.ConnectionEvents.engine_connect` event. :param conn: The newly copied :class:`.Connection` object :param opts: dictionary of options that were passed to the :meth:`.Connection.execution_options` method. .. versionadded:: 0.9.0 .. seealso:: :meth:`.ConnectionEvents.set_engine_execution_options` - event which is called when :meth:`.Engine.execution_options` is called. """ def set_engine_execution_options(self, engine, opts): """Intercept when the :meth:`.Engine.execution_options` method is called. The :meth:`.Engine.execution_options` method produces a shallow copy of the :class:`.Engine` which stores the new options. That new :class:`.Engine` is passed here. A particular application of this method is to add a :meth:`.ConnectionEvents.engine_connect` event handler to the given :class:`.Engine` which will perform some per- :class:`.Connection` task specific to these execution options. :param conn: The newly copied :class:`.Engine` object :param opts: dictionary of options that were passed to the :meth:`.Connection.execution_options` method. .. versionadded:: 0.9.0 .. seealso:: :meth:`.ConnectionEvents.set_connection_execution_options` - event which is called when :meth:`.Connection.execution_options` is called. """ def begin(self, conn): """Intercept begin() events. :param conn: :class:`.Connection` object """ def rollback(self, conn): """Intercept rollback() events, as initiated by a :class:`.Transaction`. Note that the :class:`.Pool` also "auto-rolls back" a DBAPI connection upon checkin, if the ``reset_on_return`` flag is set to its default value of ``'rollback'``. To intercept this rollback, use the :meth:`.PoolEvents.reset` hook. :param conn: :class:`.Connection` object .. seealso:: :meth:`.PoolEvents.reset` """ def commit(self, conn): """Intercept commit() events, as initiated by a :class:`.Transaction`. Note that the :class:`.Pool` may also "auto-commit" a DBAPI connection upon checkin, if the ``reset_on_return`` flag is set to the value ``'commit'``. To intercept this commit, use the :meth:`.PoolEvents.reset` hook. :param conn: :class:`.Connection` object """ def savepoint(self, conn, name): """Intercept savepoint() events. :param conn: :class:`.Connection` object :param name: specified name used for the savepoint. """ def rollback_savepoint(self, conn, name, context): """Intercept rollback_savepoint() events. :param conn: :class:`.Connection` object :param name: specified name used for the savepoint. :param context: :class:`.ExecutionContext` in use. May be ``None``. """ def release_savepoint(self, conn, name, context): """Intercept release_savepoint() events. :param conn: :class:`.Connection` object :param name: specified name used for the savepoint. :param context: :class:`.ExecutionContext` in use. May be ``None``. """ def begin_twophase(self, conn, xid): """Intercept begin_twophase() events. :param conn: :class:`.Connection` object :param xid: two-phase XID identifier """ def prepare_twophase(self, conn, xid): """Intercept prepare_twophase() events. :param conn: :class:`.Connection` object :param xid: two-phase XID identifier """ def rollback_twophase(self, conn, xid, is_prepared): """Intercept rollback_twophase() events. :param conn: :class:`.Connection` object :param xid: two-phase XID identifier :param is_prepared: boolean, indicates if :meth:`.TwoPhaseTransaction.prepare` was called. """ def commit_twophase(self, conn, xid, is_prepared): """Intercept commit_twophase() events. :param conn: :class:`.Connection` object :param xid: two-phase XID identifier :param is_prepared: boolean, indicates if :meth:`.TwoPhaseTransaction.prepare` was called. """ class DialectEvents(event.Events): """event interface for execution-replacement functions. These events allow direct instrumentation and replacement of key dialect functions which interact with the DBAPI. .. note:: :class:`.DialectEvents` hooks should be considered **semi-public** and experimental. These hooks are not for general use and are only for those situations where intricate re-statement of DBAPI mechanics must be injected onto an existing dialect. For general-use statement-interception events, please use the :class:`.ConnectionEvents` interface. .. seealso:: :meth:`.ConnectionEvents.before_cursor_execute` :meth:`.ConnectionEvents.before_execute` :meth:`.ConnectionEvents.after_cursor_execute` :meth:`.ConnectionEvents.after_execute` .. versionadded:: 0.9.4 """ _target_class_doc = "SomeEngine" _dispatch_target = Dialect @classmethod def _listen(cls, event_key, retval=False): target, identifier, fn = \ event_key.dispatch_target, event_key.identifier, event_key.fn target._has_events = True event_key.base_listen() @classmethod def _accept_with(cls, target): if isinstance(target, type): if issubclass(target, Engine): return Dialect elif issubclass(target, Dialect): return target elif isinstance(target, Engine): return target.dialect else: return target def do_executemany(self, cursor, statement, parameters, context): """Receive a cursor to have executemany() called. Return the value True to halt further events from invoking, and to indicate that the cursor execution has already taken place within the event handler. """ def do_execute_no_params(self, cursor, statement, context): """Receive a cursor to have execute() with no parameters called. Return the value True to halt further events from invoking, and to indicate that the cursor execution has already taken place within the event handler. """ def do_execute(self, cursor, statement, parameters, context): """Receive a cursor to have execute() called. Return the value True to halt further events from invoking, and to indicate that the cursor execution has already taken place within the event handler. """
adamwwt/chvac
venv/lib/python2.7/site-packages/sqlalchemy/events.py
Python
mit
40,130
0.0001
from django.conf.urls import include, url from . import views urlpatterns = [ url(r'^$', views.subform, name='subform'), url(r'^submit', views.submit, name='submit'), ]
jameskane05/final_helpstl
submit/urls.py
Python
gpl-2.0
177
0.00565
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2017 # Leandro Toledo de Souza <devs@python-telegram-bot.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. import json import pytest from telegram import (InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton, InlineQueryResultVoice, InlineKeyboardMarkup) @pytest.fixture(scope='class') def inline_query_result_location(): return InlineQueryResultLocation(TestInlineQueryResultLocation.id, TestInlineQueryResultLocation.latitude, TestInlineQueryResultLocation.longitude, TestInlineQueryResultLocation.title, thumb_url=TestInlineQueryResultLocation.thumb_url, thumb_width=TestInlineQueryResultLocation.thumb_width, thumb_height=TestInlineQueryResultLocation.thumb_height, input_message_content=TestInlineQueryResultLocation.input_message_content, reply_markup=TestInlineQueryResultLocation.reply_markup) class TestInlineQueryResultLocation: id = 'id' type = 'location' latitude = 0.0 longitude = 1.0 title = 'title' thumb_url = 'thumb url' thumb_width = 10 thumb_height = 15 input_message_content = InputTextMessageContent('input_message_content') reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]]) def test_expected_values(self, inline_query_result_location): assert inline_query_result_location.id == self.id assert inline_query_result_location.type == self.type assert inline_query_result_location.latitude == self.latitude assert inline_query_result_location.longitude == self.longitude assert inline_query_result_location.title == self.title assert inline_query_result_location.thumb_url == self.thumb_url assert inline_query_result_location.thumb_width == self.thumb_width assert inline_query_result_location.thumb_height == self.thumb_height assert inline_query_result_location.input_message_content.to_dict() == \ self.input_message_content.to_dict() assert inline_query_result_location.reply_markup.to_dict() == self.reply_markup.to_dict() def test_to_json(self, inline_query_result_location): json.loads(inline_query_result_location.to_json()) def test_to_dict(self, inline_query_result_location): inline_query_result_location_dict = inline_query_result_location.to_dict() assert isinstance(inline_query_result_location_dict, dict) assert inline_query_result_location_dict['id'] == inline_query_result_location.id assert inline_query_result_location_dict['type'] == inline_query_result_location.type assert inline_query_result_location_dict['latitude'] == \ inline_query_result_location.latitude assert inline_query_result_location_dict['longitude'] == \ inline_query_result_location.longitude assert inline_query_result_location_dict['title'] == inline_query_result_location.title assert inline_query_result_location_dict['thumb_url'] == \ inline_query_result_location.thumb_url assert inline_query_result_location_dict['thumb_width'] == \ inline_query_result_location.thumb_width assert inline_query_result_location_dict['thumb_height'] == \ inline_query_result_location.thumb_height assert inline_query_result_location_dict['input_message_content'] == \ inline_query_result_location.input_message_content.to_dict() assert inline_query_result_location_dict['reply_markup'] == \ inline_query_result_location.reply_markup.to_dict() def test_equality(self): a = InlineQueryResultLocation(self.id, self.longitude, self.latitude, self.title) b = InlineQueryResultLocation(self.id, self.longitude, self.latitude, self.title) c = InlineQueryResultLocation(self.id, 0, self.latitude, self.title) d = InlineQueryResultLocation("", self.longitude, self.latitude, self.title) e = InlineQueryResultVoice(self.id, "", "") assert a == b assert hash(a) == hash(b) assert a is not b assert a == c assert hash(a) == hash(c) assert a != d assert hash(a) != hash(d) assert a != e assert hash(a) != hash(e)
rogerscristo/BotFWD
env/lib/python3.6/site-packages/pytests/test_inlinequeryresultlocation.py
Python
mit
5,366
0.002795
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/wearables/ithorian/shared_ith_backpack_s01.iff" result.attribute_template_id = 11 result.stfName("wearables_name","ith_backpack_s01") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
anhstudios/swganh
data/scripts/templates/object/tangible/wearables/ithorian/shared_ith_backpack_s01.py
Python
mit
470
0.046809
#!/usr/bin/python # coding=utf-8 import hashlib import os import re import subprocess import sys import tempfile from datetime import datetime from gtts import gTTS while 1: line = sys.stdin.readline().strip() if line == '': break key, data = line.split(':') if key[:4] != 'agi_': #skip input that doesn't begin with agi_ sys.stderr.write("Did not work!\n") sys.stderr.flush() continue key = key.strip() data = data.strip() if key != '': env[key] = data def _speak_espeak(text): base_file_name = tempfile.named_temporary_file().name raw_file_name = tempfile.named_temporary_file().name + '-raw.wav' subprocess.call(['espeak', text, '-vbrazil-mbrola-4', '-g0.5', '-p60', '-s130', '-w', raw_file_name]) subprocess.call(['sox', raw_file_name, base_file_name + '.wav', 'rate', '8k']) os.remove(raw_file_name) return base_file_name def _speak_gtts(text): try: text.decode('utf-8') except: text = text.encode('utf-8') digest = '/tmp/' + hashlib.sha224(text).hexdigest() file_name = digest + '.mp3' if os.path.isfile(file_name): return file_name raw_file_name = digest + '-raw.mp3' tts = gTTS(text=text, lang='pt-br') tts.save(raw_file_name) subprocess.call(['lame', '--scale', '10', raw_file_name, file_name]) os.remove(raw_file_name) return file_name def busy(timeout): sys.stdout.write("EXEC Busy %s\n %timeout ") sys.stdout.flush() sys.stderr.write("EXEC Busy %s\n %timeout ") sys.stderr.flush() line = sys.stdin.readline() result = line.strip() return int(checkresult(result)) - 48 def checkresult (params): sys.stderr.write("checkresult: %s\n" % params) params = params.rstrip() if re.search('^200', params): result = re.search('result=(\d+)', params) if (not result): sys.stderr.write("FAIL ('%s')\n" % params) sys.stderr.flush() return -1 else: result = result.group(1) sys.stderr.write("PASS (%s)\n" % result) sys.stderr.flush() return result else: sys.stderr.write("FAIL (unexpected result '%s')\n" % params) sys.stderr.flush() return -2 def hangup(): sys.stdout.write("EXEC Hangup") sys.stdout.flush() sys.stderr.write("EXEC Hangup") sys.stderr.flush() line = sys.stdin.readline() result = line.strip() return int(checkresult(result)) - 48 def read_digit(timeout): sys.stdout.write("WAIT FOR DIGIT %s\n" %timeout ) sys.stdout.flush() sys.stderr.write("WAIT FOR DIGIT %s\n" %timeout ) sys.stderr.flush() line = sys.stdin.readline() sys.stderr.write('wait_for_digit line: %s\n' % line) result = line.strip() return int(checkresult(result)) - 48 def record(filepath): sys.stdout.write("EXEC MixMonitor " + filepath) sys.stdout.flush() sys.stderr.write("MixMonitor(wav, " + filepath +", mb)\n") sys.stderr.flush() line = sys.stdin.readline() result = line.strip() return int(checkresult(result)) - 48 def speak(text): try: file_name = _speak_gtts(text) sys.stdout.write("EXEC MP3Player %s\n" % file_name) except: print(sys.exc_info()) file_name = _speak_espeak(text) sys.stdout.write("EXEC PLAYBACK %s\n" % file_name) sys.stdout.flush() result = sys.stdin.readline().strip() return checkresult(result) def transfer(tech, dest): sys.stdout.write("EXEC DIAL %s/%s\n" % (tech,dest)) sys.stdout.flush() result = sys.stdin.readline().strip() checkresult(result) monitor() def wait_exten(timeout): sys.stdout.write("EXEC WaitExten %s\n %timeout ") sys.stdout.flush() sys.stderr.write("EXEC WaitExten %s\n %timeout ") sys.stderr.flush() line = sys.stdin.readline() result = line.strip() return int(checkresult(result)) - 48 def write_digit(digit, timeout, duration): if timeout is None and duration is None: sys.stdout.write("EXEC SendDTMF %s\n" % digit ) sys.stdout.flush() elif duration is None: sys.stdout.write("EXEC SendDTMF %s/%s\n" % (digit, timeout) ) sys.stdout.flush() elif timeout is None: sys.stdout.write("EXEC SendDTMF %s/%s\n" % (digit, duration) ) sys.stdout.flush() else: sys.stdout.write("EXEC SendDTMF %s %s %s\n" % (digit, timeout, duration) ) sys.stdout.flush() sys.stderr.write("EXEC SendDTMF %s/%s\n" % (digit, duration)) sys.stderr.flush() line = sys.stdin.readline() result = line.strip() return int(checkresult(result)) - 48
lucascudo/pytherisk
pytherisk.py
Python
gpl-3.0
4,654
0.010314
import base64 import json from twisted.internet.defer import inlineCallbacks, DeferredQueue, returnValue from twisted.web.http_headers import Headers from twisted.web import http from twisted.web.server import NOT_DONE_YET from vumi.config import ConfigContext from vumi.message import TransportUserMessage, TransportEvent from vumi.tests.helpers import VumiTestCase from vumi.tests.utils import MockHttpServer, LogCatcher from vumi.transports.vumi_bridge.client import StreamingClient from vumi.utils import http_request_full from go.apps.http_api.resource import ( StreamResourceMixin, StreamingConversationResource) from go.apps.tests.helpers import AppWorkerHelper from go.apps.http_api.vumi_app import StreamingHTTPWorker class TestStreamingHTTPWorker(VumiTestCase): @inlineCallbacks def setUp(self): self.app_helper = self.add_helper(AppWorkerHelper(StreamingHTTPWorker)) self.config = { 'health_path': '/health/', 'web_path': '/foo', 'web_port': 0, 'metrics_prefix': 'metrics_prefix.', 'conversation_cache_ttl': 0, } self.app = yield self.app_helper.get_app_worker(self.config) self.addr = self.app.webserver.getHost() self.url = 'http://%s:%s%s' % ( self.addr.host, self.addr.port, self.config['web_path']) conv_config = { 'http_api': { 'api_tokens': [ 'token-1', 'token-2', 'token-3', ], 'metric_store': 'metric_store', } } conversation = yield self.app_helper.create_conversation( config=conv_config) yield self.app_helper.start_conversation(conversation) self.conversation = yield self.app_helper.get_conversation( conversation.key) self.auth_headers = { 'Authorization': ['Basic ' + base64.b64encode('%s:%s' % ( conversation.user_account.key, 'token-1'))], } self.client = StreamingClient() # Mock server to test HTTP posting of inbound messages & events self.mock_push_server = MockHttpServer(self.handle_request) yield self.mock_push_server.start() self.add_cleanup(self.mock_push_server.stop) self.push_calls = DeferredQueue() self._setup_wait_for_request() self.add_cleanup(self._wait_for_requests) def _setup_wait_for_request(self): # Hackery to wait for the request to finish self._req_state = { 'queue': DeferredQueue(), 'expected': 0, } orig_track = StreamingConversationResource.track_request orig_release = StreamingConversationResource.release_request def track_wrapper(*args, **kw): self._req_state['expected'] += 1 return orig_track(*args, **kw) def release_wrapper(*args, **kw): return orig_release(*args, **kw).addCallback( self._req_state['queue'].put) self.patch( StreamingConversationResource, 'track_request', track_wrapper) self.patch( StreamingConversationResource, 'release_request', release_wrapper) @inlineCallbacks def _wait_for_requests(self): while self._req_state['expected'] > 0: yield self._req_state['queue'].get() self._req_state['expected'] -= 1 def handle_request(self, request): self.push_calls.put(request) return NOT_DONE_YET @inlineCallbacks def pull_message(self, count=1): url = '%s/%s/messages.json' % (self.url, self.conversation.key) messages = DeferredQueue() errors = DeferredQueue() receiver = self.client.stream( TransportUserMessage, messages.put, errors.put, url, Headers(self.auth_headers)) received_messages = [] for msg_id in range(count): yield self.app_helper.make_dispatch_inbound( 'in %s' % (msg_id,), message_id=str(msg_id), conv=self.conversation) recv_msg = yield messages.get() received_messages.append(recv_msg) receiver.disconnect() returnValue((receiver, received_messages)) def assert_bad_request(self, response, reason): self.assertEqual(response.code, http.BAD_REQUEST) self.assertEqual( response.headers.getRawHeaders('content-type'), ['application/json; charset=utf-8']) data = json.loads(response.delivered_body) self.assertEqual(data, { "success": False, "reason": reason, }) @inlineCallbacks def test_proxy_buffering_headers_off(self): # This is the default, but we patch it anyway to make sure we're # testing the right thing should the default change. self.patch(StreamResourceMixin, 'proxy_buffering', False) receiver, received_messages = yield self.pull_message() headers = receiver._response.headers self.assertEqual(headers.getRawHeaders('x-accel-buffering'), ['no']) @inlineCallbacks def test_proxy_buffering_headers_on(self): self.patch(StreamResourceMixin, 'proxy_buffering', True) receiver, received_messages = yield self.pull_message() headers = receiver._response.headers self.assertEqual(headers.getRawHeaders('x-accel-buffering'), ['yes']) @inlineCallbacks def test_content_type(self): receiver, received_messages = yield self.pull_message() headers = receiver._response.headers self.assertEqual( headers.getRawHeaders('content-type'), ['application/json; charset=utf-8']) @inlineCallbacks def test_messages_stream(self): url = '%s/%s/messages.json' % (self.url, self.conversation.key) messages = DeferredQueue() errors = DeferredQueue() receiver = self.client.stream( TransportUserMessage, messages.put, errors.put, url, Headers(self.auth_headers)) msg1 = yield self.app_helper.make_dispatch_inbound( 'in 1', message_id='1', conv=self.conversation) msg2 = yield self.app_helper.make_dispatch_inbound( 'in 2', message_id='2', conv=self.conversation) rm1 = yield messages.get() rm2 = yield messages.get() receiver.disconnect() # Sometimes messages arrive out of order if we're hitting real redis. rm1, rm2 = sorted([rm1, rm2], key=lambda m: m['message_id']) self.assertEqual(msg1['message_id'], rm1['message_id']) self.assertEqual(msg2['message_id'], rm2['message_id']) self.assertEqual(errors.size, None) @inlineCallbacks def test_events_stream(self): url = '%s/%s/events.json' % (self.url, self.conversation.key) events = DeferredQueue() errors = DeferredQueue() receiver = yield self.client.stream(TransportEvent, events.put, events.put, url, Headers(self.auth_headers)) msg1 = yield self.app_helper.make_stored_outbound( self.conversation, 'out 1', message_id='1') ack1 = yield self.app_helper.make_dispatch_ack( msg1, conv=self.conversation) msg2 = yield self.app_helper.make_stored_outbound( self.conversation, 'out 2', message_id='2') ack2 = yield self.app_helper.make_dispatch_ack( msg2, conv=self.conversation) ra1 = yield events.get() ra2 = yield events.get() receiver.disconnect() # Sometimes messages arrive out of order if we're hitting real redis. if ra1['event_id'] != ack1['event_id']: ra1, ra2 = ra2, ra1 self.assertEqual(ack1['event_id'], ra1['event_id']) self.assertEqual(ack2['event_id'], ra2['event_id']) self.assertEqual(errors.size, None) @inlineCallbacks def test_missing_auth(self): url = '%s/%s/messages.json' % (self.url, self.conversation.key) queue = DeferredQueue() receiver = self.client.stream( TransportUserMessage, queue.put, queue.put, url) response = yield receiver.get_response() self.assertEqual(response.code, http.UNAUTHORIZED) self.assertEqual(response.headers.getRawHeaders('www-authenticate'), [ 'basic realm="Conversation Realm"']) @inlineCallbacks def test_invalid_auth(self): url = '%s/%s/messages.json' % (self.url, self.conversation.key) queue = DeferredQueue() headers = Headers({ 'Authorization': ['Basic %s' % (base64.b64encode('foo:bar'),)], }) receiver = self.client.stream( TransportUserMessage, queue.put, queue.put, url, headers) response = yield receiver.get_response() self.assertEqual(response.code, http.UNAUTHORIZED) self.assertEqual(response.headers.getRawHeaders('www-authenticate'), [ 'basic realm="Conversation Realm"']) @inlineCallbacks def test_send_to(self): msg = { 'to_addr': '+2345', 'content': 'foo', 'message_id': 'evil_id', } # TaggingMiddleware.add_tag_to_msg(msg, self.tag) url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assertEqual( response.headers.getRawHeaders('content-type'), ['application/json; charset=utf-8']) self.assertEqual(response.code, http.OK) put_msg = json.loads(response.delivered_body) [sent_msg] = self.app_helper.get_dispatched_outbound() self.assertEqual(sent_msg['to_addr'], sent_msg['to_addr']) self.assertEqual(sent_msg['helper_metadata'], { 'go': { 'conversation_key': self.conversation.key, 'conversation_type': 'http_api', 'user_account': self.conversation.user_account.key, }, }) # We do not respect the message_id that's been given. self.assertNotEqual(sent_msg['message_id'], msg['message_id']) self.assertEqual(sent_msg['message_id'], put_msg['message_id']) self.assertEqual(sent_msg['to_addr'], msg['to_addr']) self.assertEqual(sent_msg['from_addr'], None) @inlineCallbacks def test_send_to_within_content_length_limit(self): self.conversation.config['http_api'].update({ 'content_length_limit': 182, }) yield self.conversation.save() msg = { 'content': 'foo', 'to_addr': '+1234', } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assertEqual( response.headers.getRawHeaders('content-type'), ['application/json; charset=utf-8']) put_msg = json.loads(response.delivered_body) self.assertEqual(response.code, http.OK) [sent_msg] = self.app_helper.get_dispatched_outbound() self.assertEqual(sent_msg['to_addr'], put_msg['to_addr']) self.assertEqual(sent_msg['helper_metadata'], { 'go': { 'conversation_key': self.conversation.key, 'conversation_type': 'http_api', 'user_account': self.conversation.user_account.key, }, }) self.assertEqual(sent_msg['message_id'], put_msg['message_id']) self.assertEqual(sent_msg['session_event'], None) self.assertEqual(sent_msg['to_addr'], '+1234') self.assertEqual(sent_msg['from_addr'], None) @inlineCallbacks def test_send_to_content_too_long(self): self.conversation.config['http_api'].update({ 'content_length_limit': 10, }) yield self.conversation.save() msg = { 'content': "This message is longer than 10 characters.", 'to_addr': '+1234', } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full( url, json.dumps(msg), self.auth_headers, method='PUT') self.assert_bad_request( response, "Payload content too long: 42 > 10") @inlineCallbacks def test_send_to_with_evil_content(self): msg = { 'content': 0xBAD, 'to_addr': '+1234', } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assert_bad_request( response, "Invalid or missing value for payload key 'content'") @inlineCallbacks def test_send_to_with_evil_to_addr(self): msg = { 'content': 'good', 'to_addr': 1234, } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assert_bad_request( response, "Invalid or missing value for payload key 'to_addr'") @inlineCallbacks def test_in_reply_to(self): inbound_msg = yield self.app_helper.make_stored_inbound( self.conversation, 'in 1', message_id='1') msg = { 'content': 'foo', 'in_reply_to': inbound_msg['message_id'], } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assertEqual( response.headers.getRawHeaders('content-type'), ['application/json; charset=utf-8']) put_msg = json.loads(response.delivered_body) self.assertEqual(response.code, http.OK) [sent_msg] = self.app_helper.get_dispatched_outbound() self.assertEqual(sent_msg['to_addr'], put_msg['to_addr']) self.assertEqual(sent_msg['helper_metadata'], { 'go': { 'conversation_key': self.conversation.key, 'conversation_type': 'http_api', 'user_account': self.conversation.user_account.key, }, }) self.assertEqual(sent_msg['message_id'], put_msg['message_id']) self.assertEqual(sent_msg['session_event'], None) self.assertEqual(sent_msg['to_addr'], inbound_msg['from_addr']) self.assertEqual(sent_msg['from_addr'], '9292') @inlineCallbacks def test_in_reply_to_within_content_length_limit(self): self.conversation.config['http_api'].update({ 'content_length_limit': 182, }) yield self.conversation.save() inbound_msg = yield self.app_helper.make_stored_inbound( self.conversation, 'in 1', message_id='1') msg = { 'content': 'foo', 'in_reply_to': inbound_msg['message_id'], } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assertEqual( response.headers.getRawHeaders('content-type'), ['application/json; charset=utf-8']) put_msg = json.loads(response.delivered_body) self.assertEqual(response.code, http.OK) [sent_msg] = self.app_helper.get_dispatched_outbound() self.assertEqual(sent_msg['to_addr'], put_msg['to_addr']) self.assertEqual(sent_msg['helper_metadata'], { 'go': { 'conversation_key': self.conversation.key, 'conversation_type': 'http_api', 'user_account': self.conversation.user_account.key, }, }) self.assertEqual(sent_msg['message_id'], put_msg['message_id']) self.assertEqual(sent_msg['session_event'], None) self.assertEqual(sent_msg['to_addr'], inbound_msg['from_addr']) self.assertEqual(sent_msg['from_addr'], '9292') @inlineCallbacks def test_in_reply_to_content_too_long(self): self.conversation.config['http_api'].update({ 'content_length_limit': 10, }) yield self.conversation.save() inbound_msg = yield self.app_helper.make_stored_inbound( self.conversation, 'in 1', message_id='1') msg = { 'content': "This message is longer than 10 characters.", 'in_reply_to': inbound_msg['message_id'], } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full( url, json.dumps(msg), self.auth_headers, method='PUT') self.assert_bad_request( response, "Payload content too long: 42 > 10") @inlineCallbacks def test_in_reply_to_with_evil_content(self): inbound_msg = yield self.app_helper.make_stored_inbound( self.conversation, 'in 1', message_id='1') msg = { 'content': 0xBAD, 'in_reply_to': inbound_msg['message_id'], } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assert_bad_request( response, "Invalid or missing value for payload key 'content'") @inlineCallbacks def test_invalid_in_reply_to(self): msg = { 'content': 'foo', 'in_reply_to': '1', # this doesn't exist } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assert_bad_request(response, 'Invalid in_reply_to value') @inlineCallbacks def test_invalid_in_reply_to_with_missing_conversation_key(self): # create a message with no conversation inbound_msg = self.app_helper.make_inbound('in 1', message_id='msg-1') vumi_api = self.app_helper.vumi_helper.get_vumi_api() yield vumi_api.mdb.add_inbound_message(inbound_msg) msg = { 'content': 'foo', 'in_reply_to': inbound_msg['message_id'], } url = '%s/%s/messages.json' % (self.url, self.conversation.key) with LogCatcher(message='Invalid reply to message <Message .*>' ' which has no conversation key') as lc: response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') [error_log] = lc.messages() self.assert_bad_request(response, "Invalid in_reply_to value") self.assertTrue(inbound_msg['message_id'] in error_log) @inlineCallbacks def test_in_reply_to_with_evil_session_event(self): inbound_msg = yield self.app_helper.make_stored_inbound( self.conversation, 'in 1', message_id='1') msg = { 'content': 'foo', 'in_reply_to': inbound_msg['message_id'], 'session_event': 0xBAD5E55104, } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assert_bad_request( response, "Invalid or missing value for payload key 'session_event'") self.assertEqual(self.app_helper.get_dispatched_outbound(), []) @inlineCallbacks def test_in_reply_to_with_evil_message_id(self): inbound_msg = yield self.app_helper.make_stored_inbound( self.conversation, 'in 1', message_id='1') msg = { 'content': 'foo', 'in_reply_to': inbound_msg['message_id'], 'message_id': 'evil_id' } url = '%s/%s/messages.json' % (self.url, self.conversation.key) response = yield http_request_full(url, json.dumps(msg), self.auth_headers, method='PUT') self.assertEqual(response.code, http.OK) self.assertEqual( response.headers.getRawHeaders('content-type'), ['application/json; charset=utf-8']) put_msg = json.loads(response.delivered_body) [sent_msg] = self.app_helper.get_dispatched_outbound() # We do not respect the message_id that's been given. self.assertNotEqual(sent_msg['message_id'], msg['message_id']) self.assertEqual(sent_msg['message_id'], put_msg['message_id']) self.assertEqual(sent_msg['to_addr'], inbound_msg['from_addr']) self.assertEqual(sent_msg['from_addr'], '9292') @inlineCallbacks def test_metric_publishing(self): metric_data = [ ("vumi.test.v1", 1234, 'SUM'), ("vumi.test.v2", 3456, 'AVG'), ] url = '%s/%s/metrics.json' % (self.url, self.conversation.key) response = yield http_request_full( url, json.dumps(metric_data), self.auth_headers, method='PUT') self.assertEqual(response.code, http.OK) self.assertEqual( response.headers.getRawHeaders('content-type'), ['application/json; charset=utf-8']) prefix = "go.campaigns.test-0-user.stores.metric_store" self.assertEqual( self.app_helper.get_published_metrics(self.app), [("%s.vumi.test.v1" % prefix, 1234), ("%s.vumi.test.v2" % prefix, 3456)]) @inlineCallbacks def test_concurrency_limits(self): config = yield self.app.get_config(None) concurrency = config.concurrency_limit queue = DeferredQueue() url = '%s/%s/messages.json' % (self.url, self.conversation.key) max_receivers = [self.client.stream( TransportUserMessage, queue.put, queue.put, url, Headers(self.auth_headers)) for _ in range(concurrency)] for i in range(concurrency): msg = yield self.app_helper.make_dispatch_inbound( 'in %s' % (i,), message_id=str(i), conv=self.conversation) received = yield queue.get() self.assertEqual(msg['message_id'], received['message_id']) maxed_out_resp = yield http_request_full( url, method='GET', headers=self.auth_headers) self.assertEqual(maxed_out_resp.code, 403) self.assertTrue( 'Too many concurrent connections' in maxed_out_resp.delivered_body) [r.disconnect() for r in max_receivers] @inlineCallbacks def test_disabling_concurrency_limit(self): conv_resource = StreamingConversationResource( self.app, self.conversation.key) # negative concurrency limit disables it ctxt = ConfigContext(user_account=self.conversation.user_account.key, concurrency_limit=-1) config = yield self.app.get_config(msg=None, ctxt=ctxt) self.assertTrue( (yield conv_resource.is_allowed( config, self.conversation.user_account.key))) @inlineCallbacks def test_backlog_on_connect(self): for i in range(10): yield self.app_helper.make_dispatch_inbound( 'in %s' % (i,), message_id=str(i), conv=self.conversation) queue = DeferredQueue() url = '%s/%s/messages.json' % (self.url, self.conversation.key) receiver = self.client.stream( TransportUserMessage, queue.put, queue.put, url, Headers(self.auth_headers)) for i in range(10): received = yield queue.get() self.assertEqual(received['message_id'], str(i)) receiver.disconnect() @inlineCallbacks def test_health_response(self): health_url = 'http://%s:%s%s' % ( self.addr.host, self.addr.port, self.config['health_path']) response = yield http_request_full(health_url, method='GET') self.assertEqual(response.delivered_body, '0') yield self.app_helper.make_dispatch_inbound( 'in 1', message_id='1', conv=self.conversation) queue = DeferredQueue() stream_url = '%s/%s/messages.json' % (self.url, self.conversation.key) stream_receiver = self.client.stream( TransportUserMessage, queue.put, queue.put, stream_url, Headers(self.auth_headers)) yield queue.get() response = yield http_request_full(health_url, method='GET') self.assertEqual(response.delivered_body, '1') stream_receiver.disconnect() response = yield http_request_full(health_url, method='GET') self.assertEqual(response.delivered_body, '0') self.assertEqual(self.app.client_manager.clients, { 'sphex.stream.message.%s' % (self.conversation.key,): [] }) @inlineCallbacks def test_post_inbound_message(self): # Set the URL so stuff is HTTP Posted instead of streamed. self.conversation.config['http_api'].update({ 'push_message_url': self.mock_push_server.url, }) yield self.conversation.save() msg_d = self.app_helper.make_dispatch_inbound( 'in 1', message_id='1', conv=self.conversation) req = yield self.push_calls.get() posted_json_data = req.content.read() req.finish() msg = yield msg_d posted_msg = TransportUserMessage.from_json(posted_json_data) self.assertEqual(posted_msg['message_id'], msg['message_id']) @inlineCallbacks def test_post_inbound_message_201_response(self): # Set the URL so stuff is HTTP Posted instead of streamed. self.conversation.config['http_api'].update({ 'push_message_url': self.mock_push_server.url, }) yield self.conversation.save() with LogCatcher(message='Got unexpected response code') as lc: msg_d = self.app_helper.make_dispatch_inbound( 'in 1', message_id='1', conv=self.conversation) req = yield self.push_calls.get() req.setResponseCode(201) req.finish() yield msg_d self.assertEqual(lc.messages(), []) @inlineCallbacks def test_post_inbound_message_500_response(self): # Set the URL so stuff is HTTP Posted instead of streamed. self.conversation.config['http_api'].update({ 'push_message_url': self.mock_push_server.url, }) yield self.conversation.save() with LogCatcher(message='Got unexpected response code') as lc: msg_d = self.app_helper.make_dispatch_inbound( 'in 1', message_id='1', conv=self.conversation) req = yield self.push_calls.get() req.setResponseCode(500) req.finish() yield msg_d [warning_log] = lc.messages() self.assertTrue(self.mock_push_server.url in warning_log) self.assertTrue('500' in warning_log) @inlineCallbacks def test_post_inbound_event(self): # Set the URL so stuff is HTTP Posted instead of streamed. self.conversation.config['http_api'].update({ 'push_event_url': self.mock_push_server.url, }) yield self.conversation.save() msg = yield self.app_helper.make_stored_outbound( self.conversation, 'out 1', message_id='1') event_d = self.app_helper.make_dispatch_ack( msg, conv=self.conversation) req = yield self.push_calls.get() posted_json_data = req.content.read() req.finish() ack = yield event_d self.assertEqual(TransportEvent.from_json(posted_json_data), ack) @inlineCallbacks def test_bad_urls(self): def assert_not_found(url, headers={}): d = http_request_full(self.url, method='GET', headers=headers) d.addCallback(lambda r: self.assertEqual(r.code, http.NOT_FOUND)) return d yield assert_not_found(self.url) yield assert_not_found(self.url + '/') yield assert_not_found('%s/%s' % (self.url, self.conversation.key), headers=self.auth_headers) yield assert_not_found('%s/%s/' % (self.url, self.conversation.key), headers=self.auth_headers) yield assert_not_found('%s/%s/foo' % (self.url, self.conversation.key), headers=self.auth_headers) @inlineCallbacks def test_send_message_command(self): yield self.app_helper.dispatch_command( 'send_message', user_account_key=self.conversation.user_account.key, conversation_key=self.conversation.key, command_data={ u'batch_id': u'batch-id', u'content': u'foo', u'to_addr': u'to_addr', u'msg_options': { u'helper_metadata': { u'tag': { u'tag': [u'longcode', u'default10080'] } }, u'from_addr': u'default10080', } }) [msg] = self.app_helper.get_dispatched_outbound() self.assertEqual(msg.payload['to_addr'], "to_addr") self.assertEqual(msg.payload['from_addr'], "default10080") self.assertEqual(msg.payload['content'], "foo") self.assertEqual(msg.payload['message_type'], "user_message") self.assertEqual( msg.payload['helper_metadata']['go']['user_account'], self.conversation.user_account.key) self.assertEqual( msg.payload['helper_metadata']['tag']['tag'], ['longcode', 'default10080']) @inlineCallbacks def test_process_command_send_message_in_reply_to(self): msg = yield self.app_helper.make_stored_inbound( self.conversation, "foo") yield self.app_helper.dispatch_command( 'send_message', user_account_key=self.conversation.user_account.key, conversation_key=self.conversation.key, command_data={ u'batch_id': u'batch-id', u'content': u'foo', u'to_addr': u'to_addr', u'msg_options': { u'helper_metadata': { u'tag': { u'tag': [u'longcode', u'default10080'] } }, u'transport_name': u'smpp_transport', u'in_reply_to': msg['message_id'], u'transport_type': u'sms', u'from_addr': u'default10080', } }) [sent_msg] = self.app_helper.get_dispatched_outbound() self.assertEqual(sent_msg['to_addr'], msg['from_addr']) self.assertEqual(sent_msg['content'], 'foo') self.assertEqual(sent_msg['in_reply_to'], msg['message_id'])
praekelt/vumi-go
go/apps/http_api/tests/test_vumi_app.py
Python
bsd-3-clause
31,622
0
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for GetModel # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-aiplatform # [START aiplatform_generated_aiplatform_v1_ModelService_GetModel_async] from google.cloud import aiplatform_v1 async def sample_get_model(): # Create a client client = aiplatform_v1.ModelServiceAsyncClient() # Initialize request argument(s) request = aiplatform_v1.GetModelRequest( name="name_value", ) # Make the request response = await client.get_model(request=request) # Handle the response print(response) # [END aiplatform_generated_aiplatform_v1_ModelService_GetModel_async]
googleapis/python-aiplatform
samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_async.py
Python
apache-2.0
1,466
0.000682
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Fixes foreign key relationship.""" from invenio_ext.sqlalchemy import db from invenio_upgrader.api import op depends_on = ['invenio_2015_03_03_tag_value'] def info(): """Return upgrade recipe information.""" return "Fixes foreign key relationship." def do_upgrade(): """Carry out the upgrade.""" op.alter_column( table_name='oaiHARVESTLOG', column_name='bibupload_task_id', type_=db.MediumInteger(15, unsigned=True), existing_nullable=False, existing_server_default='0' ) def estimate(): """Estimate running time of upgrade in seconds (optional).""" return 1 def pre_upgrade(): """Pre-upgrade checks.""" pass def post_upgrade(): """Post-upgrade checks.""" pass
hachreak/invenio-oaiharvester
invenio_oaiharvester/upgrades/oaiharvester_2015_07_14_innodb.py
Python
gpl-2.0
1,535
0
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from heat.common import exception from heat.engine import properties from heat.engine import resource from heat.engine.resources.openstack.keystone import role_assignments from heat.engine import stack from heat.engine import template from heat.tests import common from heat.tests import utils RESOURCE_TYPE = 'OS::Keystone::DummyRoleAssignment' keystone_role_assignment_template = { 'heat_template_version': '2013-05-23', 'resources': { 'test_role_assignment': { 'type': RESOURCE_TYPE, 'properties': { 'roles': [ { 'role': 'role_1', 'project': 'project_1', }, { 'role': 'role_1', 'domain': 'domain_1' } ] } } } } class KeystoneRoleAssignmentTest(common.HeatTestCase): def setUp(self): super(KeystoneRoleAssignmentTest, self).setUp() self.ctx = utils.dummy_context() # For unit testing purpose. Register resource provider explicitly. resource._register_class(RESOURCE_TYPE, role_assignments.KeystoneRoleAssignment) self.stack = stack.Stack( self.ctx, 'test_stack_keystone', template.Template(keystone_role_assignment_template) ) self.test_role_assignment = self.stack['test_role_assignment'] # Mock client self.keystoneclient = mock.MagicMock() self.test_role_assignment.client = mock.MagicMock() self.test_role_assignment.client.return_value = self.keystoneclient self.roles = self.keystoneclient.client.roles # Mock client plugin def _side_effect(value): return value self.keystone_client_plugin = mock.MagicMock() (self.keystone_client_plugin.get_domain_id. side_effect) = _side_effect (self.keystone_client_plugin.get_role_id. side_effect) = _side_effect (self.keystone_client_plugin.get_project_id. side_effect) = _side_effect self.test_role_assignment.client_plugin = mock.MagicMock() (self.test_role_assignment.client_plugin. return_value) = self.keystone_client_plugin def test_resource_mapping_not_defined(self): # this resource is not planned to support in heat, so resource_mapping # is not to be defined in KeystoneRoleAssignment try: from ..resources.role_assignments import resource_mapping # noqa self.fail("KeystoneRoleAssignment is designed to be exposed as" "Heat resource") except Exception: pass def test_properties_title(self): property_title_map = { role_assignments.KeystoneRoleAssignment.ROLES: 'roles' } for actual_title, expected_title in property_title_map.items(): self.assertEqual( expected_title, actual_title, 'KeystoneRoleAssignment PROPERTIES(%s) title modified.' % actual_title) def test_property_roles_validate_schema(self): schema = (role_assignments.KeystoneRoleAssignment. properties_schema[ role_assignments.KeystoneRoleAssignment.ROLES]) self.assertEqual( True, schema.update_allowed, 'update_allowed for property %s is modified' % role_assignments.KeystoneRoleAssignment.ROLES) self.assertEqual(properties.Schema.LIST, schema.type, 'type for property %s is modified' % role_assignments.KeystoneRoleAssignment.ROLES) self.assertEqual('List of role assignments.', schema.description, 'description for property %s is modified' % role_assignments.KeystoneRoleAssignment.ROLES) def test_role_assignment_handle_create_user(self): # validate the properties self.assertEqual([ { 'role': 'role_1', 'project': 'project_1', 'domain': None }, { 'role': 'role_1', 'project': None, 'domain': 'domain_1' }], (self.test_role_assignment.properties. get(role_assignments.KeystoneRoleAssignment.ROLES))) self.test_role_assignment.handle_create(user_id='user_1', group_id=None) # validate role assignment creation # role-user-domain self.roles.grant.assert_any_call( role='role_1', user='user_1', domain='domain_1') # role-user-project self.roles.grant.assert_any_call( role='role_1', user='user_1', project='project_1') def test_role_assignment_handle_create_group(self): # validate the properties self.assertEqual([ { 'role': 'role_1', 'project': 'project_1', 'domain': None }, { 'role': 'role_1', 'project': None, 'domain': 'domain_1' }], (self.test_role_assignment.properties. get(role_assignments.KeystoneRoleAssignment.ROLES))) self.test_role_assignment.handle_create(user_id=None, group_id='group_1') # validate role assignment creation # role-group-domain self.roles.grant.assert_any_call( role='role_1', group='group_1', domain='domain_1') # role-group-project self.roles.grant.assert_any_call( role='role_1', group='group_1', project='project_1') def test_role_assignment_handle_update_user(self): self.test_role_assignment._stored_properties_data = { 'roles': [ { 'role': 'role_1', 'project': 'project_1' }, { 'role': 'role_1', 'domain': 'domain_1' } ] } prop_diff = { role_assignments.KeystoneRoleAssignment.ROLES: [ { 'role': 'role_2', 'project': 'project_1' }, { 'role': 'role_2', 'domain': 'domain_1' } ] } self.test_role_assignment.handle_update( user_id='user_1', group_id=None, prop_diff=prop_diff) # Add role2-project1-domain1 # role-user-domain self.roles.grant.assert_any_call( role='role_2', user='user_1', domain='domain_1') # role-user-project self.roles.grant.assert_any_call( role='role_2', user='user_1', project='project_1') # Remove role1-project1-domain1 # role-user-domain self.roles.revoke.assert_any_call( role='role_1', user='user_1', domain='domain_1') # role-user-project self.roles.revoke.assert_any_call( role='role_1', user='user_1', project='project_1') def test_role_assignment_handle_update_group(self): self.test_role_assignment._stored_properties_data = { 'roles': [ { 'role': 'role_1', 'project': 'project_1' }, { 'role': 'role_1', 'domain': 'domain_1' } ] } prop_diff = { role_assignments.KeystoneRoleAssignment.ROLES: [ { 'role': 'role_2', 'project': 'project_1' }, { 'role': 'role_2', 'domain': 'domain_1' } ] } self.test_role_assignment.handle_update( user_id=None, group_id='group_1', prop_diff=prop_diff) # Add role2-project1-domain1 # role-group-domain self.roles.grant.assert_any_call( role='role_2', group='group_1', domain='domain_1') # role-group-project self.roles.grant.assert_any_call( role='role_2', group='group_1', project='project_1') # Remove role1-project1-domain1 # role-group-domain self.roles.revoke.assert_any_call( role='role_1', group='group_1', domain='domain_1') # role-group-project self.roles.revoke.assert_any_call( role='role_1', group='group_1', project='project_1') def test_role_assignment_handle_delete_user(self): self.test_role_assignment._stored_properties_data = { 'roles': [ { 'role': 'role_1', 'project': 'project_1' }, { 'role': 'role_1', 'domain': 'domain_1' } ] } self.assertIsNone(self.test_role_assignment.handle_delete( user_id='user_1', group_id=None )) # Remove role1-project1-domain1 # role-user-domain self.roles.revoke.assert_any_call( role='role_1', user='user_1', domain='domain_1') # role-user-project self.roles.revoke.assert_any_call( role='role_1', user='user_1', project='project_1') def test_role_assignment_handle_delete_group(self): self.test_role_assignment._stored_properties_data = { 'roles': [ { 'role': 'role_1', 'project': 'project_1' }, { 'role': 'role_1', 'domain': 'domain_1' } ] } self.assertIsNone(self.test_role_assignment.handle_delete( user_id=None, group_id='group_1' )) # Remove role1-project1-domain1 # role-group-domain self.roles.revoke.assert_any_call( role='role_1', group='group_1', domain='domain_1') # role-group-project self.roles.revoke.assert_any_call( role='role_1', group='group_1', project='project_1') def test_validate_1(self): self.test_role_assignment.properties = mock.MagicMock() # both project and domain are none self.test_role_assignment.properties.get.return_value = [ dict(role='role1')] self.assertRaises(exception.StackValidationFailed, self.test_role_assignment.validate) def test_validate_2(self): self.test_role_assignment.properties = mock.MagicMock() # both project and domain are not none self.test_role_assignment.properties.get.return_value = [ dict(role='role1', project='project1', domain='domain1') ] self.assertRaises(exception.ResourcePropertyConflict, self.test_role_assignment.validate)
miguelgrinberg/heat
heat/tests/keystone/test_role_assignments.py
Python
apache-2.0
12,401
0
STRICT = False try: from django.conf import settings STRICT = getattr(settings, 'PDF_MINER_IS_STRICT', STRICT) except Exception: # in case it's not a django project pass
tiffanyjaya/kai
vendors/pdfminer.six/pdfminer/settings.py
Python
mit
187
0
from __future__ import absolute_import, print_function import base64 import logging import six import traceback from time import time from django.conf import settings from django.contrib.auth.models import AnonymousUser from django.core.cache import cache from django.core.urlresolvers import reverse from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed from django.utils.encoding import force_bytes from django.views.decorators.cache import never_cache, cache_control from django.views.decorators.csrf import csrf_exempt from django.views.generic.base import View as BaseView from functools import wraps from raven.contrib.django.models import client as Raven from sentry import quotas, tsdb from sentry.coreapi import ( APIError, APIForbidden, APIRateLimited, ClientApiHelper, CspApiHelper, LazyData, MinidumpApiHelper, ) from sentry.interfaces import schemas from sentry.models import Project, OrganizationOption, Organization from sentry.signals import ( event_accepted, event_dropped, event_filtered, event_received) from sentry.quotas.base import RateLimit from sentry.utils import json, metrics from sentry.utils.data_filters import FILTER_STAT_KEYS_TO_VALUES from sentry.utils.data_scrubber import SensitiveDataFilter from sentry.utils.dates import to_datetime from sentry.utils.http import ( is_valid_origin, get_origins, is_same_domain, ) from sentry.utils.pubsub import QueuedPublisher, RedisPublisher from sentry.utils.safe import safe_execute from sentry.web.helpers import render_to_response logger = logging.getLogger('sentry') # Transparent 1x1 gif # See http://probablyprogramming.com/2009/03/15/the-tiniest-gif-ever PIXEL = base64.b64decode('R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=') PROTOCOL_VERSIONS = frozenset(('2.0', '3', '4', '5', '6', '7')) pubsub = QueuedPublisher( RedisPublisher(getattr(settings, 'REQUESTS_PUBSUB_CONNECTION', None)) ) if getattr(settings, 'REQUESTS_PUBSUB_ENABLED', False) else None def api(func): @wraps(func) def wrapped(request, *args, **kwargs): data = func(request, *args, **kwargs) if request.is_ajax(): response = HttpResponse(data) response['Content-Type'] = 'application/json' else: ref = request.META.get('HTTP_REFERER') if ref is None or not is_same_domain(ref, request.build_absolute_uri()): ref = reverse('sentry') return HttpResponseRedirect(ref) return response return wrapped class APIView(BaseView): helper_cls = ClientApiHelper def _get_project_from_id(self, project_id): if not project_id: return if not project_id.isdigit(): raise APIError('Invalid project_id: %r' % project_id) try: return Project.objects.get_from_cache(id=project_id) except Project.DoesNotExist: raise APIError('Invalid project_id: %r' % project_id) def _parse_header(self, request, helper, project): auth = helper.auth_from_request(request) if auth.version not in PROTOCOL_VERSIONS: raise APIError( 'Client using unsupported server protocol version (%r)' % six.text_type(auth.version or '') ) if not auth.client: raise APIError("Client did not send 'client' identifier") return auth @csrf_exempt @never_cache def dispatch(self, request, project_id=None, *args, **kwargs): helper = self.helper_cls( agent=request.META.get('HTTP_USER_AGENT'), project_id=project_id, ip_address=request.META['REMOTE_ADDR'], ) origin = None try: origin = helper.origin_from_request(request) response = self._dispatch( request, helper, project_id=project_id, origin=origin, *args, **kwargs ) except APIError as e: context = { 'error': force_bytes(e.msg, errors='replace'), } if e.name: context['error_name'] = e.name response = HttpResponse( json.dumps(context), content_type='application/json', status=e.http_status ) # Set X-Sentry-Error as in many cases it is easier to inspect the headers response['X-Sentry-Error'] = context['error'] if isinstance(e, APIRateLimited) and e.retry_after is not None: response['Retry-After'] = six.text_type(e.retry_after) except Exception as e: # TODO(dcramer): test failures are not outputting the log message # here if settings.DEBUG: content = traceback.format_exc() else: content = '' logger.exception(e) response = HttpResponse( content, content_type='text/plain', status=500) # TODO(dcramer): it'd be nice if we had an incr_multi method so # tsdb could optimize this metrics.incr('client-api.all-versions.requests') metrics.incr('client-api.all-versions.responses.%s' % (response.status_code, )) metrics.incr( 'client-api.all-versions.responses.%sxx' % ( six.text_type(response.status_code)[0], ) ) if helper.context.version: metrics.incr('client-api.v%s.requests' % (helper.context.version, )) metrics.incr( 'client-api.v%s.responses.%s' % ( helper.context.version, response.status_code) ) metrics.incr( 'client-api.v%s.responses.%sxx' % (helper.context.version, six.text_type( response.status_code)[0]) ) if response.status_code != 200 and origin: # We allow all origins on errors response['Access-Control-Allow-Origin'] = '*' if origin: response['Access-Control-Allow-Headers'] = \ 'X-Sentry-Auth, X-Requested-With, Origin, Accept, ' \ 'Content-Type, Authentication' response['Access-Control-Allow-Methods'] = \ ', '.join(self._allowed_methods()) response['Access-Control-Expose-Headers'] = \ 'X-Sentry-Error, Retry-After' return response def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs): request.user = AnonymousUser() project = self._get_project_from_id(project_id) if project: helper.context.bind_project(project) Raven.tags_context(helper.context.get_tags_context()) if origin is not None: # This check is specific for clients who need CORS support if not project: raise APIError('Client must be upgraded for CORS support') if not is_valid_origin(origin, project): tsdb.incr(tsdb.models.project_total_received_cors, project.id) raise APIForbidden('Invalid origin: %s' % (origin, )) # XXX: It seems that the OPTIONS call does not always include custom headers if request.method == 'OPTIONS': response = self.options(request, project) else: auth = self._parse_header(request, helper, project) key = helper.project_key_from_auth(auth) # Legacy API was /api/store/ and the project ID was only available elsewhere if not project: project = Project.objects.get_from_cache(id=key.project_id) helper.context.bind_project(project) elif key.project_id != project.id: raise APIError('Two different projects were specified') helper.context.bind_auth(auth) Raven.tags_context(helper.context.get_tags_context()) # Explicitly bind Organization so we don't implicitly query it later # this just allows us to comfortably assure that `project.organization` is safe. # This also allows us to pull the object from cache, instead of being # implicitly fetched from database. project.organization = Organization.objects.get_from_cache( id=project.organization_id) if auth.version != '2.0': if not auth.secret_key: # If we're missing a secret_key, check if we are allowed # to do a CORS request. # If we're missing an Origin/Referrer header entirely, # we only want to support this on GET requests. By allowing # un-authenticated CORS checks for POST, we basially # are obsoleting our need for a secret key entirely. if origin is None and request.method != 'GET': raise APIForbidden( 'Missing required attribute in authentication header: sentry_secret' ) if not is_valid_origin(origin, project): if project: tsdb.incr( tsdb.models.project_total_received_cors, project.id) raise APIForbidden( 'Missing required Origin or Referer header') response = super(APIView, self).dispatch( request=request, project=project, auth=auth, helper=helper, key=key, **kwargs ) if origin: if origin == 'null': # If an Origin is `null`, but we got this far, that means # we've gotten past our CORS check for some reason. But the # problem is that we can't return "null" as a valid response # to `Access-Control-Allow-Origin` and we don't have another # value to work with, so just allow '*' since they've gotten # this far. response['Access-Control-Allow-Origin'] = '*' else: response['Access-Control-Allow-Origin'] = origin return response # XXX: backported from Django 1.5 def _allowed_methods(self): return [m.upper() for m in self.http_method_names if hasattr(self, m)] def options(self, request, *args, **kwargs): response = HttpResponse() response['Allow'] = ', '.join(self._allowed_methods()) response['Content-Length'] = '0' return response class StoreView(APIView): """ The primary endpoint for storing new events. This will validate the client's authentication and data, and if successful pass on the payload to the internal database handler. Authentication works in three flavors: 1. Explicit signed requests These are implemented using the documented signed request protocol, and require an authentication header which is signed using with the project member's secret key. 2. CORS Secured Requests Generally used for communications with client-side platforms (such as JavaScript in the browser), they require a standard header, excluding the signature and timestamp requirements, and must be listed in the origins for the given project (or the global origins). 3. Implicit trusted requests Used by the Sentry core, they are only available from same-domain requests and do not require any authentication information. They only require that the user be authenticated, and a project_id be sent in the GET variables. """ def post(self, request, **kwargs): try: data = request.body except Exception as e: logger.exception(e) # We were unable to read the body. # This would happen if a request were submitted # as a multipart form for example, where reading # body yields an Exception. There's also not a more # sane exception to catch here. This will ultimately # bubble up as an APIError. data = None if pubsub is not None and data is not None: pubsub.publish('requests', data) response_or_event_id = self.process(request, data=data, **kwargs) if isinstance(response_or_event_id, HttpResponse): return response_or_event_id return HttpResponse( json.dumps({ 'id': response_or_event_id, }), content_type='application/json' ) def get(self, request, **kwargs): data = request.GET.get('sentry_data', '') response_or_event_id = self.process(request, data=data, **kwargs) # Return a simple 1x1 gif for browser so they don't throw a warning response = HttpResponse(PIXEL, 'image/gif') if not isinstance(response_or_event_id, HttpResponse): response['X-Sentry-ID'] = response_or_event_id return response def process(self, request, project, key, auth, helper, data, **kwargs): metrics.incr('events.total') if not data: raise APIError('No JSON data was found') remote_addr = request.META['REMOTE_ADDR'] data = LazyData( data=data, content_encoding=request.META.get('HTTP_CONTENT_ENCODING', ''), helper=helper, project=project, key=key, auth=auth, client_ip=remote_addr, ) event_received.send_robust( ip=remote_addr, project=project, sender=type(self), ) start_time = time() tsdb_start_time = to_datetime(start_time) should_filter, filter_reason = helper.should_filter( project, data, ip_address=remote_addr) if should_filter: increment_list = [ (tsdb.models.project_total_received, project.id), (tsdb.models.project_total_blacklisted, project.id), (tsdb.models.organization_total_received, project.organization_id), (tsdb.models.organization_total_blacklisted, project.organization_id), (tsdb.models.key_total_received, key.id), (tsdb.models.key_total_blacklisted, key.id), ] try: increment_list.append( (FILTER_STAT_KEYS_TO_VALUES[filter_reason], project.id)) # should error when filter_reason does not match a key in FILTER_STAT_KEYS_TO_VALUES except KeyError: pass tsdb.incr_multi( increment_list, timestamp=tsdb_start_time, ) metrics.incr('events.blacklisted', tags={ 'reason': filter_reason}) event_filtered.send_robust( ip=remote_addr, project=project, sender=type(self), ) raise APIForbidden('Event dropped due to filter') # TODO: improve this API (e.g. make RateLimit act on __ne__) rate_limit = safe_execute( quotas.is_rate_limited, project=project, key=key, _with_transaction=False ) if isinstance(rate_limit, bool): rate_limit = RateLimit(is_limited=rate_limit, retry_after=None) # XXX(dcramer): when the rate limiter fails we drop events to ensure # it cannot cascade if rate_limit is None or rate_limit.is_limited: if rate_limit is None: helper.log.debug( 'Dropped event due to error with rate limiter') tsdb.incr_multi( [ (tsdb.models.project_total_received, project.id), (tsdb.models.project_total_rejected, project.id), (tsdb.models.organization_total_received, project.organization_id), (tsdb.models.organization_total_rejected, project.organization_id), (tsdb.models.key_total_received, key.id), (tsdb.models.key_total_rejected, key.id), ], timestamp=tsdb_start_time, ) metrics.incr( 'events.dropped', tags={ 'reason': rate_limit.reason_code if rate_limit else 'unknown', } ) event_dropped.send_robust( ip=remote_addr, project=project, sender=type(self), reason_code=rate_limit.reason_code if rate_limit else None, ) if rate_limit is not None: raise APIRateLimited(rate_limit.retry_after) else: tsdb.incr_multi( [ (tsdb.models.project_total_received, project.id), (tsdb.models.organization_total_received, project.organization_id), (tsdb.models.key_total_received, key.id), ], timestamp=tsdb_start_time, ) org_options = OrganizationOption.objects.get_all_values( project.organization_id) if org_options.get('sentry:require_scrub_ip_address', False): scrub_ip_address = True else: scrub_ip_address = project.get_option( 'sentry:scrub_ip_address', False) event_id = data['event_id'] # TODO(dcramer): ideally we'd only validate this if the event_id was # supplied by the user cache_key = 'ev:%s:%s' % (project.id, event_id, ) if cache.get(cache_key) is not None: raise APIForbidden( 'An event with the same ID already exists (%s)' % (event_id, )) if org_options.get('sentry:require_scrub_data', False): scrub_data = True else: scrub_data = project.get_option('sentry:scrub_data', True) if scrub_data: # We filter data immediately before it ever gets into the queue sensitive_fields_key = 'sentry:sensitive_fields' sensitive_fields = ( org_options.get(sensitive_fields_key, []) + project.get_option(sensitive_fields_key, []) ) exclude_fields_key = 'sentry:safe_fields' exclude_fields = ( org_options.get(exclude_fields_key, []) + project.get_option(exclude_fields_key, []) ) if org_options.get('sentry:require_scrub_defaults', False): scrub_defaults = True else: scrub_defaults = project.get_option( 'sentry:scrub_defaults', True) inst = SensitiveDataFilter( fields=sensitive_fields, include_defaults=scrub_defaults, exclude_fields=exclude_fields, ) inst.apply(data) if scrub_ip_address: # We filter data immediately before it ever gets into the queue helper.ensure_does_not_have_ip(data) # mutates data (strips a lot of context if not queued) helper.insert_data_to_database(data, start_time=start_time) cache.set(cache_key, '', 60 * 5) helper.log.debug('New event received (%s)', event_id) event_accepted.send_robust( ip=remote_addr, data=data, project=project, sender=type(self), ) return event_id class MinidumpView(StoreView): helper_cls = MinidumpApiHelper content_types = ('multipart/form-data', ) def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs): # TODO(ja): Refactor shared code with CspReportView. Especially, look at # the sentry_key override and test it. # A minidump submission as implemented by Breakpad and Crashpad or any # other library following the Mozilla Soccorro protocol is a POST request # without Origin or Referer headers. Therefore, we cannot validate the # origin of the request, but we *can* validate the "prod" key in future. if request.method != 'POST': return HttpResponseNotAllowed(['POST']) content_type = request.META.get('CONTENT_TYPE') # In case of multipart/form-data, the Content-Type header also includes # a boundary. Therefore, we cannot check for an exact match. if content_type is None or not content_type.startswith(self.content_types): raise APIError('Invalid Content-Type') request.user = AnonymousUser() project = self._get_project_from_id(project_id) helper.context.bind_project(project) Raven.tags_context(helper.context.get_tags_context()) # This is yanking the auth from the querystring since it's not # in the POST body. This means we expect a `sentry_key` and # `sentry_version` to be set in querystring auth = helper.auth_from_request(request) key = helper.project_key_from_auth(auth) if key.project_id != project.id: raise APIError('Two different projects were specified') helper.context.bind_auth(auth) Raven.tags_context(helper.context.get_tags_context()) return super(APIView, self).dispatch( request=request, project=project, auth=auth, helper=helper, key=key, **kwargs ) def post(self, request, **kwargs): try: data = request.POST data['upload_file_minidump'] = request.FILES['upload_file_minidump'] except KeyError: raise APIError('Missing minidump upload') response_or_event_id = self.process(request, data=data, **kwargs) if isinstance(response_or_event_id, HttpResponse): return response_or_event_id return HttpResponse( json.dumps({'id': response_or_event_id}), content_type='application/json' ) class StoreSchemaView(BaseView): def get(self, request, **kwargs): return HttpResponse(json.dumps(schemas.EVENT_SCHEMA), content_type='application/json') class CspReportView(StoreView): helper_cls = CspApiHelper content_types = ('application/csp-report', 'application/json') def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs): # A CSP report is sent as a POST request with no Origin or Referer # header. What we're left with is a 'document-uri' key which is # inside of the JSON body of the request. This 'document-uri' value # should be treated as an origin check since it refers to the page # that triggered the report. The Content-Type is supposed to be # `application/csp-report`, but FireFox sends it as `application/json`. if request.method != 'POST': return HttpResponseNotAllowed(['POST']) if request.META.get('CONTENT_TYPE') not in self.content_types: raise APIError('Invalid Content-Type') request.user = AnonymousUser() project = self._get_project_from_id(project_id) helper.context.bind_project(project) Raven.tags_context(helper.context.get_tags_context()) # This is yanking the auth from the querystring since it's not # in the POST body. This means we expect a `sentry_key` and # `sentry_version` to be set in querystring auth = helper.auth_from_request(request) key = helper.project_key_from_auth(auth) if key.project_id != project.id: raise APIError('Two different projects were specified') helper.context.bind_auth(auth) Raven.tags_context(helper.context.get_tags_context()) return super(APIView, self).dispatch( request=request, project=project, auth=auth, helper=helper, key=key, **kwargs ) def post(self, request, project, helper, **kwargs): data = helper.safely_load_json_string(request.body) # Do origin check based on the `document-uri` key as explained # in `_dispatch`. try: report = data['csp-report'] except KeyError: raise APIError('Missing csp-report') origin = report.get('document-uri') # No idea, but this is garbage if origin == 'about:blank': raise APIForbidden('Invalid document-uri') if not is_valid_origin(origin, project): if project: tsdb.incr(tsdb.models.project_total_received_cors, project.id) raise APIForbidden('Invalid document-uri') # Attach on collected meta data. This data obviously isn't a part # of the spec, but we need to append to the report sentry specific things. report['_meta'] = { 'release': request.GET.get('sentry_release'), } response_or_event_id = self.process( request, project=project, helper=helper, data=report, **kwargs ) if isinstance(response_or_event_id, HttpResponse): return response_or_event_id return HttpResponse(status=201) @cache_control(max_age=3600, public=True) def robots_txt(request): return HttpResponse("User-agent: *\nDisallow: /\n", content_type='text/plain') @cache_control(max_age=3600, public=True) def crossdomain_xml_index(request): response = render_to_response('sentry/crossdomain_index.xml') response['Content-Type'] = 'application/xml' return response @cache_control(max_age=60) def crossdomain_xml(request, project_id): if not project_id.isdigit(): return HttpResponse(status=404) try: project = Project.objects.get_from_cache(id=project_id) except Project.DoesNotExist: return HttpResponse(status=404) origin_list = get_origins(project) response = render_to_response( 'sentry/crossdomain.xml', {'origin_list': origin_list}) response['Content-Type'] = 'application/xml' return response
gencer/sentry
src/sentry/web/api.py
Python
bsd-3-clause
26,313
0.001254
#!/usr/bin/env python # -*- coding: utf-8 -*- """ wfdiff test suite. Run with pytest. :copyright: Lion Krischer (krischer@geophysik.uni-muenchen.de), 2014-2015 :license: GNU General Public License, Version 3 (http://www.gnu.org/copyleft/gpl.html) """ import inspect import os # Most generic way to get the data folder path. DATA_DIR = os.path.join(os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))), "data")
krischer/wfdiff
src/wfdiff/tests/test_wfdiff.py
Python
gpl-3.0
452
0
### Implementation of the numerical Stehfest inversion inspired by J Barker https://www.uni-leipzig.de/diffusion/presentations_DFII/pdf/DFII_Barker_Reduced.pdf import inversion import math def finiteConc(t, v, De, R, deg, x, c0, L, N): ''' t is time (T), v is velocity (L/T), De is effective hydrodynamic dispersion (including diffusion) (L^2/T), R is retardation (-), deg is first order decay constant (1/T), x is position along path (L), c0 is source concentration (M/L^3), L is pathway length (L), n is effective porosity (-), N is input variable stehfestCoeff(). Return concentration (M/L^3) at position x''' Vs = inversion.stehfestCoeff(N) rt = math.log(2.0) / t Sum = 0 for i in range(1, N+1): s = i * rt Sum = Sum + Vs[i - 1] * (c0 / s) * ((math.exp(((((v + (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * x)) + ((((v - (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * L))) - math.exp(((((v - (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * x)) + ((((v + (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * L)))) / (math.exp((((v - (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * L)) - math.exp((((v + (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De)) * L)))) return rt * Sum def finiteFlux(t, v, De, R, deg, x, c0, L, n, N): ''' t is time (T), v is velocity (L/T), De is effective hydrodynamic dispersion (including diffusion) (L^2/T), R is retardation (-), deg is first order decay constant (1/T), x is position along path (L), c0 is source concentration (M/L^3), L is pathway length (L), n is effective porosity (-), N is input variable stehfestCoeff(). Return concentration (M/L^3) at position x''' Vs = inversion.stehfestCoeff(N) rt = math.log(2.0) / t Sum = 0 for i in range(1, N+1): s = i * rt a1 = (v - (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De) a2 = (v + (v ** 2 + (4 * De * R * (s + deg))) ** (1 / 2)) / (2 * De) z1 = (v - (De * a2)) z2 = (v - (De * a1)) Sum = Sum + Vs[i - 1] * ((c0 * n) / s) * ((((z1 * math.exp((a2 * x) + (a1 * L))) - (z2 * math.exp((a1 * x) + (a2 * L)))) / (math.exp(a1 * L) - math.exp(a2 * L)))) return rt * Sum def infiniteConc(t, v, De, R, deg, x, c0, N): ''' t is time (T), v is velocity (L/T), De is effective hydrodynamic dispersion (including diffusion) (L^2/T), R is retardation (-), deg is first order decay constant (1/T), x is position along path (L), c0 is source concentration (M/L^3), n is effective porosity (-), N is input variable stehfestCoeff(). Return concentration (M/L^3) at position x''' Vs = inversion.stehfestCoeff(N) rt = math.log(2.0) / t Sum = 0 for i in range(1, N+1): s = i * rt Sum = Sum + Vs[i - 1] * (c0 / s) * math.exp(((v - (v ** 2 + (4 * De * R * (s + deg))) ** 0.5) / (2 * De)) * x) return rt * Sum def infiniteFlux(t, v, De, R, deg, x, c0, n, N): ''' t is time (T), v is velocity (L/T), De is effective hydrodynamic dispersion (including diffusion) (L^2/T), R is retardation (-), deg is first order decay constant (1/T), x is position along path (L), c0 is source concentration (M/L^3), n is effective porosity (-), N is input variable stehfestCoeff(). Return flux at position x''' Vs = inversion.stehfestCoeff(N) rt = math.log(2.0) / t Sum = 0 for i in range(1, N+1): s = i * rt Sum = Sum + Vs[i - 1] * (((c0 * n) / s) * ((v - (De * ((v - ((v ** 2) + (4 * De * R * (s + deg))) ** 0.5) / (2 * De)))) * math.exp(((v - (v ** 2 + (4 * De * R * (s + deg))) ** 0.5) / (2 * De)) * x))) return rt * Sum
tachylyte/HydroGeoPy
one_d_numerical.py
Python
bsd-2-clause
3,784
0.013214
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('quizzes', '0004_auto_20150811_1354'), ] operations = [ migrations.AlterModelOptions( name='choice', options={'ordering': ['order']}, ), migrations.AlterModelOptions( name='question', options={'ordering': ['order']}, ), migrations.AlterModelOptions( name='quiz', options={'ordering': ['-timestamp'], 'verbose_name': 'Quiz', 'verbose_name_plural': 'Quizzes'}, ), migrations.AddField( model_name='choice', name='order', field=models.IntegerField(default=0), ), migrations.AddField( model_name='question', name='order', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='quiz', name='is_active', field=models.BooleanField(default=False, verbose_name=b'active'), ), ]
ikedumancas/ikequizgen
quizzes/migrations/0005_auto_20150813_0645.py
Python
mit
1,156
0.000865
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Proxy that dispatches Discovery requests to the prod Discovery service.""" import httplib import json import logging class DiscoveryApiProxy(object): """Proxies discovery service requests to a known cloud endpoint.""" # The endpoint host we're using to proxy discovery and static requests. # Using separate constants to make it easier to change the discovery service. _DISCOVERY_PROXY_HOST = 'webapis-discovery.appspot.com' _STATIC_PROXY_HOST = 'webapis-discovery.appspot.com' _DISCOVERY_API_PATH_PREFIX = '/_ah/api/discovery/v1/' def _dispatch_request(self, path, body): """Proxies GET request to discovery service API. Args: path: A string containing the URL path relative to discovery service. body: A string containing the HTTP POST request body. Returns: HTTP response body or None if it failed. """ full_path = self._DISCOVERY_API_PATH_PREFIX + path headers = {'Content-type': 'application/json'} connection = httplib.HTTPSConnection(self._DISCOVERY_PROXY_HOST) try: connection.request('POST', full_path, body, headers) response = connection.getresponse() response_body = response.read() if response.status != 200: logging.error('Discovery API proxy failed on %s with %d.\r\n' 'Request: %s\r\nResponse: %s', full_path, response.status, body, response_body) return None return response_body finally: connection.close() def generate_discovery_doc(self, api_config, api_format): """Generates a discovery document from an API file. Args: api_config: A string containing the .api file contents. api_format: A string, either 'rest' or 'rpc' depending on the which kind of discvoery doc is requested. Returns: The discovery doc as JSON string. Raises: ValueError: When api_format is invalid. """ if api_format not in ['rest', 'rpc']: raise ValueError('Invalid API format') path = 'apis/generate/' + api_format request_dict = {'config': json.dumps(api_config)} request_body = json.dumps(request_dict) return self._dispatch_request(path, request_body) def generate_directory(self, api_configs): """Generates an API directory from a list of API files. Args: api_configs: A list of strings which are the .api file contents. Returns: The API directory as JSON string. """ request_dict = {'configs': api_configs} request_body = json.dumps(request_dict) return self._dispatch_request('apis/generate/directory', request_body) def get_static_file(self, path): """Returns static content via a GET request. Args: path: A string containing the URL path after the domain. Returns: A tuple of (response, response_body): response: A HTTPResponse object with the response from the static proxy host. response_body: A string containing the response body. """ connection = httplib.HTTPSConnection(self._STATIC_PROXY_HOST) try: connection.request('GET', path, None, {}) response = connection.getresponse() response_body = response.read() finally: connection.close() return response, response_body
elsigh/browserscope
third_party/appengine_tools/devappserver2/endpoints/discovery_api_proxy.py
Python
apache-2.0
3,882
0.005667
# -*- coding: utf-8 -*- ##--------------------------------------####### # Cryptographie # ##--------------------------------------####### # WxGeometrie # Dynamic geometry, graph plotter, and more for french mathematic teachers. # Copyright (C) 2005-2013 Nicolas Pourcelot # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from string import ascii_uppercase as majuscules from functools import partial from random import shuffle import re from PyQt5.QtWidgets import QVBoxLayout, QInputDialog, QPushButton,\ QTextEdit, QGridLayout, QLabel, QLineEdit, QSpacerItem from PyQt5.QtCore import Qt, QTimer from ...GUI.menu import MenuBar from ...GUI.panel import Panel_simple from ...pylib import print_error #~ from ... import param dict_accents = { "é": "E", "É": "E", "ê": "E", "Ê": "E", "è": "E", "È": "E", "à": "A", "À": "A", "â": "A", "Â": "A", "ô": "O", "Ô": "O", "î": "I", "Î": "I", "ù": "U", "Ù": "U", "û": "U", "Û": "U", "ç": "C", "Ç": "C", } class CaseLettre(QLineEdit): def __init__(self, parent): self.parent = parent QLineEdit.__init__(self, parent) self.setAlignment(Qt.AlignCenter) def keyPressEvent(self, evt): self.parent.message('') n = evt.key() if 65 <= n <= 90 or 97 <= n <= 122: c = chr(n).upper() for case in self.parent.cases.values(): if case.text() == c: self.parent.message('La lettre %s est déjà utilisée !' %c) return self.setText(c) elif n in (Qt.Key_Backspace, Qt.Key_Delete): self.clear() ##QLineEdit.keyPressEvent(self, evt) class CryptographieMenuBar(MenuBar): def __init__(self, panel): MenuBar.__init__(self, panel) self.ajouter("Fichier", ["quitter"]) self.ajouter("Affichage", ["onglet"], ["plein_ecran"]) self.ajouter("Outils", ["Coder un message", "Code le message par substitution mono-alphabétique.", "Ctrl+K", panel.coder], ["Coder avec espaces", "Code le message en conservant les espaces (substitution mono-alphabétique).", "Ctrl+Shift+K", partial(panel.coder, espaces=True)], ["Générer une nouvelle clé", "Générer une nouvelle permutation de l'alphabet.", None, panel.generer_cle], ["Modifier la clé", "Générer une nouvelle permutation de l'alphabet.", None, panel.DlgModifierCle], None, ["Coder avec Vigenère", "Codage par la méthode de Vigenère (substitution poly-alphabétique).", None, partial(panel.coder_vigenere, ask=True)], None, ["options"]) self.ajouter("avance2") self.ajouter("?") class Cryptographie(Panel_simple): titre = "Cryptographie" # Donner un titre à chaque module def __init__(self, *args, **kw): Panel_simple.__init__(self, *args, **kw) self._freeze = False self.widget_modifie = None # La clé est la permutation de l'alphabet actuellement utilisée # pour le codage par substitution mono-alphabétique. self.generer_cle() # La clé de chiffrement pour le codage par substitution poly-alphabétique # (appelé aussi chiffre de Vigenère). self.cle_vigenere = 'EXEMPLE' # Signe indiquant un caractère non déchiffré self.symbole = '-' # '.' self.sizer = QVBoxLayout() self.textes = QGridLayout() self.textes.setSpacing(5) size = (400, 300) txt_clair = QLabel("<b>Texte en clair</b>") self.clair = QTextEdit() self.clair.setMinimumSize(*size) formater_clair = partial(self.formater, widget=self.clair) self.clair.textChanged.connect(formater_clair) self.clair.cursorPositionChanged.connect(formater_clair) self.copier_clair = QPushButton('Copier le texte en clair') self.copier_clair.clicked.connect(partial(self.copier, widget=self.clair)) txt_code = QLabel("<b>Texte codé</b>") self.code = QTextEdit() self.code.setMinimumSize(*size) self.code.textChanged.connect(self.code_modifie) self.code.cursorPositionChanged.connect(partial(self.formater, widget=self.code)) self.copier_code = QPushButton('Copier le texte codé') self.copier_code.clicked.connect(partial(self.copier, widget=self.code)) self.textes.addWidget(txt_clair, 0, 0) self.textes.addItem(QSpacerItem(50, 1), 0, 1) self.textes.addWidget(txt_code, 0, 2) self.textes.addWidget(self.clair, 1, 0) self.textes.addWidget(self.code, 1, 2) self.textes.addWidget(self.copier_code, 2, 2) self.textes.addWidget(self.copier_clair, 2, 0) self.table = QGridLayout() self.table.setSpacing(3) self.cases = {} self.table.addWidget(QLabel("Codé : ", self), 0, 0) self.table.addWidget(QLabel("Clair : ", self), 1, 0) ##self.table.setColumnStretch(0, 100) for i, l in enumerate(majuscules): lettre = QLineEdit(l, self) lettre.setAlignment(Qt.AlignCenter) lettre.setReadOnly(True) lettre.setEnabled(False) self.table.addWidget(lettre, 0, i + 1) ##self.table.setColumnStretch(i + 1, 1) for i, l in enumerate(majuscules): c = self.cases[l] = CaseLettre(self) c.setMaxLength(1) self.table.addWidget(c, 1, i + 1) c.textChanged.connect(self.decoder) self.sizer.addLayout(self.textes) self.sizer.addLayout(self.table) self.setLayout(self.sizer) ##self.adjustSize() self.couleur1 = "5A28BE" # sky blue self.couleur2 = "C86400" # Lime Green self.couleur_position = "FFCDB3" self.reg = re.compile("([-A-Za-z]|<##>|</##>)+") ##couleur_position = wx.Color(255, 205, 179) # FFCDB3 ##couleur1 = wx.Color(90, 40, 190) # 5A28BE ##couleur2 = wx.Color(200, 100, 0) # C86400 ##black = wx.Color(0, 0, 0) # 000000 ##white = wx.Color(255, 255, 255) # FFFFFF ##self.special = wx.TextAttr(wx.NullColour, couleur_position) ##self.fond = wx.TextAttr(couleur1, wx.NullColour) #"sky blue" ##self.fond2 = wx.TextAttr(couleur2, wx.NullColour) # "Lime Green" ##self.defaut = wx.TextAttr(black, white) ## ##self.Bind(wx.EVT_IDLE, self.OnIdle) timer = QTimer(self) timer.timeout.connect(self.OnIdle) timer.start(100) # DEBUG: ##self.code.setPlainText('WR IRAMXPZRHRDZ IK HRYYOVR AL IRYYBKY RYZ NOALWLZR POM WR NOLZ FKR W BD O VOMIR WRY YLVDRY IR PBDAZKOZLBD RZ WRY RYPOARY RDZMR WRY HBZY OWBMY FKR I QOELZKIR BD VMBKPR WRY WRZZMRY ALDF POM ALDF') def copier(self, evt=None, widget=None): self.vers_presse_papier(widget.toPlainText()) def DlgModifierCle(self, evt=None): while True: text, ok = QInputDialog.getText(self, "Modifier la clé", "La clé doit être une permutation de l'alphabet,\n" "ou un chiffre qui indique de combien l'alphabet est décalé.", text=str(self.cle)) if ok: try: self.modifier_cle(text) except: print_error() continue break def generer_cle(self): l = list(majuscules) shuffle(l) self.cle = ''.join(l) def modifier_cle(self, cle): cle = cle.strip().upper() if cle.isdigit(): n = int(cle) cle = majuscules[n:] + majuscules[:n] # On teste qu'il s'agit bien d'une permutation de l'alphabet: assert ''.join(sorted(cle)) == majuscules self.cle = cle def coder(self, evt=None, cle=None, espaces=False): cle = (self.cle if cle is None else cle) clair = self.clair.toPlainText().upper() for key, val in dict_accents.items(): clair = clair.replace(key, val) d = dict(zip(majuscules, cle)) code = ''.join(d.get(s, ' ') for s in clair) code = re.sub(' +', ' ', code) if not espaces: code = code.replace(' ', '') self.code.setPlainText(code) return code @staticmethod def _vigenere(l1, l2): return chr((ord(l1) + ord(l2) - 130)%26 + 65) def coder_vigenere(self, evt=None, msg=None, cle=None, ask=False): def gen(): length = len(cle) n = 0 for car in clair: if car.isalpha(): yield self._vigenere(cle[n%length], car) n += 1 else: yield car if ask: self.DlgModifierCleVigenere() if cle is None: cle = self.cle_vigenere if msg is None: msg = self.clair.toPlainText() msg = msg.upper() if cle is None: pass # Pour l'instant, les espaces ne sont pas supportés clair = msg.replace(' ', '') clair = self.clair.toPlainText().upper() for key, val in dict_accents.items(): clair = clair.replace(key, val) code = ''.join(gen()) self.code.setPlainText(code) return code def DlgModifierCleVigenere(self, evt=None): while True: text, ok = QInputDialog.getText(self, "Modifier la clé pour Vigenère", "La clé doit contenir uniquement des lettres.", text=self.cle_vigenere) if ok: text = text.strip() if not text.isalpha(): continue self.cle_vigenere = text.upper() break def decoder(self, txt=None): code = self.code.toPlainText().upper() def f(s): if s in majuscules: return self.cases[s].text() or self.symbole return s clair = ''.join(f(s) for s in code) self.clair.setPlainText(clair) def code_modifie(self, txt=None): self.decoder(txt) self.formater(txt, widget=self.code) def formater(self, evt=None, widget=None): ##evt.Skip() if self._freeze: return self.widget_modifie = widget def _formater(self, widget_modifie): # Impossible de formater les 2 textes de la même manière s'ils # ne sont pas de la même longueur. # Cela ne devrait se produire que temporairement (par ex., # l'utilisateur copie un nouveau texte) if len(self.code.toPlainText()) != len(self.clair.toPlainText()): if self.code.toPlainText() and self.clair.toPlainText(): print('Warning: le message codé et le message en clair ne sont ' 'pas de même longueur.') return def colorier(m, col1=[self.couleur1], col2=[self.couleur2]): s = m.group(0) s = "<font color='#%s'>%s</font>" % (col1[0], s) col1[0], col2[0] = col2[0], col1[0] return s self._freeze = True pos = widget_modifie.textCursor().position() for w in (self.code, self.clair): txt = w.toPlainText() if pos != len(txt): txt = txt[:pos] + '<##>' + txt[pos] + '</##>' + txt[pos + 1:] new_txt = re.sub(self.reg, colorier, txt) new_txt = new_txt.replace("<##>", "<font style='background-color: #%s;'>" % self.couleur_position) new_txt = new_txt.replace("</##>", "</font>") w.setHtml(new_txt) cursor = widget_modifie.textCursor() cursor.setPosition(pos) widget_modifie.setTextCursor(cursor) self._freeze = False self.widget_modifie = None def OnIdle(self, evt=None): if self.widget_modifie is not None and not self.parent.parent.closing: self._formater(self.widget_modifie)
wxgeo/geophar
wxgeometrie/modules/cryptographie/__init__.py
Python
gpl-2.0
12,975
0.006816
from string import printable import re from urlparse import urlunparse from itertools import chain, ifilter from fnmatch import fnmatch from werkzeug import cached_property from swarm import transport, swarm from swarm.ext.http.helpers import parser, URL from ..text import PageText from .tree import TrieTree as Tree class DescribedMixin(object): @classmethod def info(cls): if cls.__doc__: yield cls.__doc__ for base in cls.__bases__: if issubclass(base, DescribedMixin): for info in base.info(): yield info @classmethod def describe(cls): return 'Extracts ' + ' and '.join(cls.info()) class Datasource(object): __OMIT = ('dataset',) tags = None def __getstate__(self): return dict((k, v) for (k, v) in self.__dict__.items() \ if not k in self.__class__.__OMIT) RANGE = printable def __init__(self, dataset_path, **kwargs): self.dataset_path = dataset_path self.__dict__.update(kwargs) @cached_property def dataset(self): if self.dataset_path is not None: return Tree.load(self.dataset_path) def items(self): if False: yield def links(self): if False: yield class LinksMixin(DescribedMixin): """links""" tests = ('deny_scheme', 'allow_scheme', 'deny_domain', 'allow_domain', 'deny_url', 'allow_url',) modifiers = ('drop_fragment',) allow_schemas = None deny_schemas = ['javascript', 'mailto',] allow_domains = None deny_domains = None allow_urls = None deny_urls = [ '*.gif', '*.jpeg', '*.jpg', '*.css', '*.js', '*.png', '*.ico', '*.xml' ] unique = True cmdopts = { 'allow_schemas':{'default':allow_schemas, 'nargs':'+', 'help':'Allow only listed schemas'}, 'deny_schemas':{'default':deny_schemas, 'nargs':'+', 'help':'Deny listed schemas'}, 'allow_domains':{'default':allow_domains, 'nargs':'+', 'help':'Allow only listed domains (dot started treated as suffixes)'}, 'deny_domains':{'default':deny_domains, 'nargs':'+', 'help':'Deny listed domains (dot started treated as suffixes)'}, 'allow_urls':{'default':allow_urls, 'nargs':'+', 'help':'Regexps for allowed urls'}, 'deny_urls':{'default':deny_urls, 'nargs':'+', 'help':'Regexps for denied urls'}, 'no_unique':{'dest':'unique', 'default':unique, 'action':'store_false', 'help':'Disable following unique urls only'}, } def fnmatch(self, value, matchers, ret): if any((fnmatch(value, matcher) for matcher in matchers)): return ret return not ret def deny_scheme(self, url): if not self.deny_schemas: return True return self.fnmatch(url.parsed.scheme, self.deny_schemas, False) def allow_scheme(self, url): if not self.allow_schemas: return True return self.fnmatch(url.parsed.scheme, self.allow_schemas, True) def deny_domain(self, url): if not self.deny_domains: return True return self.fnmatch(url.parsed.hostname, self.deny_domains, False) def allow_domain(self, url): if not self.allow_domains: return True return self.fnmatch(url.parsed.hostname, self.allow_domains, True) def allow_url(self, url): if not self.allow_urls: return True return self.fnmatch(urlunparse((None, None) + url.parsed[2:]), self.allow_urls, True) def deny_url(self, url): if not self.deny_urls: return True return self.fnmatch(urlunparse((None, None) + url.parsed[2:]), self.deny_urls, False) def drop_fragment(self, url): if not url.parsed.fragment: return url else: return URL(urlunparse(url.parsed[:5] + ('',))) def allowed(self, url): return all(getattr(self, test)(url) for test in self.tests) def modified(self, url): for modifier in (getattr(self, modifier) for modifier in self.modifiers): url = modifier(url) return url @parser def links(self, html): from swarm import swarm if html is None: return html.make_links_absolute(transport.url) for element, attribute, link, pos in html.iterlinks(): url = URL(link) if not self.allowed(url): continue url = self.modified(url) if self.unique and self.is_unique(url): yield url def is_unique(self, url): if url in getattr(self, '_urls', []): return False elif not hasattr(self, '_urls'): self._urls = [] self._urls.append(url) return True class XpathParserMixin(DescribedMixin): """xpath selected content""" @parser def items(html): if False: yield class ReadableMixin(DescribedMixin): """textual content""" greed = 1 def items(self): yield PageText(transport.content, url=transport.url)\ .winner(greed=self.greed) class CmdlineArgsMixin(object): @classmethod def get_opts(cls): containers = cls.__bases__ + (cls,) return dict(ifilter(bool, chain(*(getattr(c, 'cmdopts', {}).items() for c in containers)))) @classmethod def populate_parser(cls, parser): for optname, kwargs in cls.get_opts().items(): parser.add_argument('--%s'%optname.replace('_', '-'), **kwargs) def __unicode__(self): descr = 'Extract ' + ' and '.join(self.info()) opts = [] for optname in self.get_opts().keys(): optvalue = getattr(self, optname, None) if optvalue and not optvalue == getattr(self.__class__, optname, None): opts += '%s:%s'%(optname, optvalue), return descr + (' (%s)'%(', '.join(opts)) if opts else '') class NoContentDatasource(CmdlineArgsMixin, LinksMixin, Datasource): pass class XpathContentOnlyDatasource(CmdlineArgsMixin, XpathParserMixin, Datasource): pass class XpathDatasource(CmdlineArgsMixin, LinksMixin, XpathParserMixin, Datasource): pass class ReadableContentOnlyDatasource(CmdlineArgsMixin, ReadableMixin, Datasource): pass class ReadableDatasource(CmdlineArgsMixin, LinksMixin, ReadableMixin, Datasource): pass
denz/swarm-crawler
swarm_crawler/dataset/datasource.py
Python
bsd-3-clause
7,406
0.009317
class River(object): def __init__(self, index_name=None, index_type=None, bulk_size=100, bulk_timeout=None): self.name = index_name self.index_name = index_name self.index_type = index_type self.bulk_size = bulk_size self.bulk_timeout = bulk_timeout def serialize(self): res = self._serialize() index = {} if self.name: index['name'] = self.name if self.index_name: index['index'] = self.index_name if self.index_type: index['type'] = self.index_type if self.bulk_size: index['bulk_size'] = self.bulk_size if self.bulk_timeout: index['bulk_timeout'] = self.bulk_timeout if index: res['index'] = index return res def __repr__(self): return str(self.serialize()) def _serialize(self): raise NotImplementedError class RabbitMQRiver(River): type = "rabbitmq" def __init__(self, host="localhost", port=5672, user="guest", password="guest", vhost="/", queue="es", exchange="es", routing_key="es", **kwargs): super(RabbitMQRiver, self).__init__(**kwargs) self.host = host self.port = port self.user = user self.password = password self.vhost = vhost self.queue = queue self.exchange = exchange self.routing_key = routing_key def _serialize(self): return { "type": self.type, self.type: { "host": self.host, "port": self.port, "user": self.user, "pass": self.password, "vhost": self.vhost, "queue": self.queue, "exchange": self.exchange, "routing_key": self.routing_key } } class TwitterRiver(River): type = "twitter" def __init__(self, user=None, password=None, **kwargs): self.user = user self.password = password self.consumer_key = kwargs.pop('consumer_key', None) self.consumer_secret = kwargs.pop('consumer_secret', None) self.access_token = kwargs.pop('access_token', None) self.access_token_secret = kwargs.pop('access_token_secret', None) # These filters may be lists or comma-separated strings of values self.tracks = kwargs.pop('tracks', None) self.follow = kwargs.pop('follow', None) self.locations = kwargs.pop('locations', None) super(TwitterRiver, self).__init__(**kwargs) def _serialize(self): result = {"type": self.type} if self.user and self.password: result[self.type] = {"user": self.user, "password": self.password} elif (self.consumer_key and self.consumer_secret and self.access_token and self.access_token_secret): result[self.type] = {"oauth": { "consumer_key": self.consumer_key, "consumer_secret": self.consumer_secret, "access_token": self.access_token, "access_token_secret": self.access_token_secret, } } else: raise ValueError("Twitter river requires authentication by username/password or OAuth") filter = {} if self.tracks: filter['tracks'] = self.tracks if self.follow: filter['follow'] = self.follow if self.locations: filter['locations'] = self.locations if filter: result[self.type]['filter'] = filter return result class CouchDBRiver(River): type = "couchdb" def __init__(self, host="localhost", port=5984, db="mydb", filter=None, filter_params=None, script=None, user=None, password=None, **kwargs): super(CouchDBRiver, self).__init__(**kwargs) self.host = host self.port = port self.db = db self.filter = filter self.filter_params = filter_params self.script = script self.user = user self.password = password def serialize(self): result = { "type": self.type, self.type: { "host": self.host, "port": self.port, "db": self.db, "filter": self.filter, } } if self.filter_params is not None: result[self.type]["filter_params"] = self.filter_params if self.script is not None: result[self.type]["script"] = self.script if self.user is not None: result[self.type]["user"] = self.user if self.password is not None: result[self.type]["password"] = self.password return result
openlabs/pyes
pyes/rivers.py
Python
bsd-3-clause
4,849
0.000619
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org> # Gael Varoquaux <gael.varoquaux@normalesup.org> # License: BSD 3 clause import numpy as np import scipy as sp from scipy import ndimage from nose.tools import assert_equal, assert_true from numpy.testing import assert_raises from sklearn.feature_extraction.image import ( img_to_graph, grid_to_graph, extract_patches_2d, reconstruct_from_patches_2d, PatchExtractor, extract_patches) from sklearn.utils.graph import connected_components from sklearn.utils.testing import SkipTest from sklearn.utils.fixes import sp_version if sp_version < (0, 12): raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and " "thus does not include the scipy.misc.face() image.") def test_img_to_graph(): x, y = np.mgrid[:4, :4] - 10 grad_x = img_to_graph(x) grad_y = img_to_graph(y) assert_equal(grad_x.nnz, grad_y.nnz) # Negative elements are the diagonal: the elements of the original # image. Positive elements are the values of the gradient, they # should all be equal on grad_x and grad_y np.testing.assert_array_equal(grad_x.data[grad_x.data > 0], grad_y.data[grad_y.data > 0]) def test_grid_to_graph(): # Checking that the function works with graphs containing no edges size = 2 roi_size = 1 # Generating two convex parts with one vertex # Thus, edges will be empty in _to_graph mask = np.zeros((size, size), dtype=np.bool) mask[0:roi_size, 0:roi_size] = True mask[-roi_size:, -roi_size:] = True mask = mask.reshape(size ** 2) A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray) assert_true(connected_components(A)[0] == 2) # Checking that the function works whatever the type of mask is mask = np.ones((size, size), dtype=np.int16) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask) assert_true(connected_components(A)[0] == 1) # Checking dtype of the graph mask = np.ones((size, size)) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool) assert_true(A.dtype == np.bool) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int) assert_true(A.dtype == np.int) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64) assert_true(A.dtype == np.float64) def test_connect_regions(): try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) for thr in (50, 150): mask = face > thr graph = img_to_graph(face, mask) assert_equal(ndimage.label(mask)[1], connected_components(graph)[0]) def test_connect_regions_with_grid(): try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) mask = face > 50 graph = grid_to_graph(*face.shape, mask=mask) assert_equal(ndimage.label(mask)[1], connected_components(graph)[0]) mask = face > 150 graph = grid_to_graph(*face.shape, mask=mask, dtype=None) assert_equal(ndimage.label(mask)[1], connected_components(graph)[0]) def _downsampled_face(): try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) face = face.astype(np.float32) face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]) face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]) face = face.astype(np.float32) face /= 16.0 return face def _orange_face(face=None): face = _downsampled_face() if face is None else face face_color = np.zeros(face.shape + (3,)) face_color[:, :, 0] = 256 - face face_color[:, :, 1] = 256 - face / 2 face_color[:, :, 2] = 256 - face / 4 return face_color def _make_images(face=None): face = _downsampled_face() if face is None else face # make a collection of faces images = np.zeros((3,) + face.shape) images[0] = face images[1] = face + 1 images[2] = face + 2 return images downsampled_face = _downsampled_face() orange_face = _orange_face(downsampled_face) face_collection = _make_images(downsampled_face) def test_extract_patches_all(): face = downsampled_face i_h, i_w = face.shape p_h, p_w = 16, 16 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w)) assert_equal(patches.shape, (expected_n_patches, p_h, p_w)) def test_extract_patches_all_color(): face = orange_face i_h, i_w = face.shape[:2] p_h, p_w = 16, 16 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w)) assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3)) def test_extract_patches_all_rect(): face = downsampled_face face = face[:, 32:97] i_h, i_w = face.shape p_h, p_w = 16, 12 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w)) assert_equal(patches.shape, (expected_n_patches, p_h, p_w)) def test_extract_patches_max_patches(): face = downsampled_face i_h, i_w = face.shape p_h, p_w = 16, 16 patches = extract_patches_2d(face, (p_h, p_w), max_patches=100) assert_equal(patches.shape, (100, p_h, p_w)) expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1)) patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5) assert_equal(patches.shape, (expected_n_patches, p_h, p_w)) assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w), max_patches=2.0) assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w), max_patches=-1.0) def test_reconstruct_patches_perfect(): face = downsampled_face p_h, p_w = 16, 16 patches = extract_patches_2d(face, (p_h, p_w)) face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) np.testing.assert_array_almost_equal(face, face_reconstructed) def test_reconstruct_patches_perfect_color(): face = orange_face p_h, p_w = 16, 16 patches = extract_patches_2d(face, (p_h, p_w)) face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) np.testing.assert_array_almost_equal(face, face_reconstructed) def test_patch_extractor_fit(): faces = face_collection extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0) assert_true(extr == extr.fit(faces)) def test_patch_extractor_max_patches(): faces = face_collection i_h, i_w = faces.shape[1:3] p_h, p_w = 8, 8 max_patches = 100 expected_n_patches = len(faces) * max_patches extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches, random_state=0) patches = extr.transform(faces) assert_true(patches.shape == (expected_n_patches, p_h, p_w)) max_patches = 0.5 expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1) * max_patches) extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches, random_state=0) patches = extr.transform(faces) assert_true(patches.shape == (expected_n_patches, p_h, p_w)) def test_patch_extractor_max_patches_default(): faces = face_collection extr = PatchExtractor(max_patches=100, random_state=0) patches = extr.transform(faces) assert_equal(patches.shape, (len(faces) * 100, 19, 25)) def test_patch_extractor_all_patches(): faces = face_collection i_h, i_w = faces.shape[1:3] p_h, p_w = 8, 8 expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) patches = extr.transform(faces) assert_true(patches.shape == (expected_n_patches, p_h, p_w)) def test_patch_extractor_color(): faces = _make_images(orange_face) i_h, i_w = faces.shape[1:3] p_h, p_w = 8, 8 expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) patches = extr.transform(faces) assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3)) def test_extract_patches_strided(): image_shapes_1D = [(10,), (10,), (11,), (10,)] patch_sizes_1D = [(1,), (2,), (3,), (8,)] patch_steps_1D = [(1,), (1,), (4,), (2,)] expected_views_1D = [(10,), (9,), (3,), (2,)] last_patch_1D = [(10,), (8,), (8,), (2,)] image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)] patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)] patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)] expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)] last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)] image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)] patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)] patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)] expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)] last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)] image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D expected_views = expected_views_1D + expected_views_2D + expected_views_3D last_patches = last_patch_1D + last_patch_2D + last_patch_3D for (image_shape, patch_size, patch_step, expected_view, last_patch) in zip(image_shapes, patch_sizes, patch_steps, expected_views, last_patches): image = np.arange(np.prod(image_shape)).reshape(image_shape) patches = extract_patches(image, patch_shape=patch_size, extraction_step=patch_step) ndim = len(image_shape) assert_true(patches.shape[:ndim] == expected_view) last_patch_slices = [slice(i, i + j, None) for i, j in zip(last_patch, patch_size)] assert_true((patches[[slice(-1, None, None)] * ndim] == image[last_patch_slices].squeeze()).all()) def test_extract_patches_square(): # test same patch size for all dimensions face = downsampled_face i_h, i_w = face.shape p = 8 expected_n_patches = ((i_h - p + 1), (i_w - p + 1)) patches = extract_patches(face, patch_shape=p) assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1], p, p)) def test_width_patch(): # width and height of the patch should be less than the image x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) assert_raises(ValueError, extract_patches_2d, x, (4, 1)) assert_raises(ValueError, extract_patches_2d, x, (1, 4))
toastedcornflakes/scikit-learn
sklearn/feature_extraction/tests/test_image.py
Python
bsd-3-clause
11,187
0.000089
# Copyright 2008-2014 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from six import add_metaclass, text_type as unicode import inspect import os.path from robot import utils from robot.errors import DataError from robot.model import Tags from .loggerhelper import AbstractLoggerProxy from .logger import LOGGER if utils.is_jython: from java.lang import Object from java.util import HashMap class _RecursionAvoidingMetaclass(type): """Metaclass to wrap listener methods so that they cannot cause recursion. Recursion would otherwise happen if one listener logs something and that message is received and logged again by log_message or message method. """ def __new__(cls, name, bases, dct): for attr, value in dct.items(): if not attr.startswith('_') and inspect.isroutine(value): dct[attr] = cls._wrap_listener_method(value) dct['_calling_method'] = False return type.__new__(cls, name, bases, dct) @staticmethod def _wrap_listener_method(method): def wrapped(self, *args): if not self._calling_method: self._calling_method = True method(self, *args) self._calling_method = False return wrapped @add_metaclass(_RecursionAvoidingMetaclass) class Listeners(object): _start_attrs = ('id', 'doc', 'starttime', 'longname') _end_attrs = _start_attrs + ('endtime', 'elapsedtime', 'status', 'message') _kw_extra_attrs = ('args', '-id', '-longname', '-message') def __init__(self, listeners): self._listeners = self._import_listeners(listeners) self._running_test = False self._setup_or_teardown_type = None def __bool__(self): return bool(self._listeners) #PY2 def __nonzero__(self): return self.__bool__() def _import_listeners(self, listener_data): listeners = [] for name, args in listener_data: try: listeners.append(ListenerProxy(name, args)) except DataError as err: if args: name += ':' + ':'.join(args) LOGGER.error("Taking listener '%s' into use failed: %s" % (name, unicode(err))) return listeners def start_suite(self, suite): for listener in self._listeners: if listener.version == 1: listener.call_method(listener.start_suite, suite.name, suite.doc) else: attrs = self._get_start_attrs(suite, 'metadata') attrs.update(self._get_suite_attrs(suite)) listener.call_method(listener.start_suite, suite.name, attrs) def _get_suite_attrs(self, suite): return { 'tests' : [t.name for t in suite.tests], 'suites': [s.name for s in suite.suites], 'totaltests': suite.test_count, 'source': suite.source or '' } def end_suite(self, suite): for listener in self._listeners: self._notify_end_suite(listener, suite) def _notify_end_suite(self, listener, suite): if listener.version == 1: listener.call_method(listener.end_suite, suite.status, suite.full_message) else: attrs = self._get_end_attrs(suite, 'metadata') attrs['statistics'] = suite.stat_message attrs.update(self._get_suite_attrs(suite)) listener.call_method(listener.end_suite, suite.name, attrs) def start_test(self, test): self._running_test = True for listener in self._listeners: if listener.version == 1: listener.call_method(listener.start_test, test.name, test.doc, list(test.tags)) else: attrs = self._get_start_attrs(test, 'tags') attrs['critical'] = 'yes' if test.critical else 'no' attrs['template'] = test.template or '' listener.call_method(listener.start_test, test.name, attrs) def end_test(self, test): self._running_test = False for listener in self._listeners: self._notify_end_test(listener, test) def _notify_end_test(self, listener, test): if listener.version == 1: listener.call_method(listener.end_test, test.status, test.message) else: attrs = self._get_end_attrs(test, 'tags') attrs['critical'] = 'yes' if test.critical else 'no' attrs['template'] = test.template or '' listener.call_method(listener.end_test, test.name, attrs) def start_keyword(self, kw): for listener in self._listeners: if listener.version == 1: listener.call_method(listener.start_keyword, kw.name, kw.args) else: attrs = self._get_start_attrs(kw, *self._kw_extra_attrs) attrs['type'] = self._get_keyword_type(kw, start=True) listener.call_method(listener.start_keyword, kw.name, attrs) def end_keyword(self, kw): for listener in self._listeners: if listener.version == 1: listener.call_method(listener.end_keyword, kw.status) else: attrs = self._get_end_attrs(kw, *self._kw_extra_attrs) attrs['type'] = self._get_keyword_type(kw, start=False) listener.call_method(listener.end_keyword, kw.name, attrs) def _get_keyword_type(self, kw, start=True): # When running setup or teardown, only the top level keyword has type # set to setup/teardown but we want to pass that type also to all # start/end_keyword listener methods called below that keyword. if kw.type == 'kw': return self._setup_or_teardown_type or 'Keyword' kw_type = self._get_setup_or_teardown_type(kw) self._setup_or_teardown_type = kw_type if start else None return kw_type def _get_setup_or_teardown_type(self, kw): return '%s %s' % (('Test' if self._running_test else 'Suite'), kw.type.title()) def log_message(self, msg): for listener in self._listeners: if listener.version == 2: listener.call_method(listener.log_message, self._create_msg_dict(msg)) def message(self, msg): for listener in self._listeners: if listener.version == 2: listener.call_method(listener.message, self._create_msg_dict(msg)) def _create_msg_dict(self, msg): return {'timestamp': msg.timestamp, 'message': msg.message, 'level': msg.level, 'html': 'yes' if msg.html else 'no'} def output_file(self, name, path): for listener in self._listeners: listener.call_method(getattr(listener, '%s_file' % name.lower()), path) def close(self): for listener in self._listeners: listener.call_method(listener.close) def _get_start_attrs(self, item, *extra): return self._get_attrs(item, self._start_attrs, extra) def _get_end_attrs(self, item, *extra): return self._get_attrs(item, self._end_attrs, extra) def _get_attrs(self, item, default, extra): names = self._get_attr_names(default, extra) return dict((n, self._get_attr_value(item, n)) for n in names) def _get_attr_names(self, default, extra): names = list(default) for name in extra: if not name.startswith('-'): names.append(name) elif name[1:] in names: names.remove(name[1:]) return names def _get_attr_value(self, item, name): value = getattr(item, name) return self._take_copy_of_mutable_value(value) def _take_copy_of_mutable_value(self, value): if isinstance(value, (dict, utils.NormalizedDict)): return dict(value) if isinstance(value, (list, tuple, Tags)): return list(value) return value class ListenerProxy(AbstractLoggerProxy): _methods = ['start_suite', 'end_suite', 'start_test', 'end_test', 'start_keyword', 'end_keyword', 'log_message', 'message', 'output_file', 'report_file', 'log_file', 'debug_file', 'xunit_file', 'close'] def __init__(self, name, args): listener = self._import_listener(name, args) AbstractLoggerProxy.__init__(self, listener) self.name = name self.version = self._get_version(listener) self.is_java = self._is_java(listener) def _is_java(self, listener): return utils.is_jython and isinstance(listener, Object) def _import_listener(self, name, args): importer = utils.Importer('listener') return importer.import_class_or_module(os.path.normpath(name), instantiate_with_args=args) def _get_version(self, listener): try: return int(getattr(listener, 'ROBOT_LISTENER_API_VERSION', 1)) except ValueError: return 1 def call_method(self, method, *args): if self.is_java: args = [self._to_map(a) if isinstance(a, dict) else a for a in args] try: method(*args) except: message, details = utils.get_error_details() LOGGER.error("Calling listener method '%s' of listener '%s' failed: %s" % (method.__name__, self.name, message)) LOGGER.info("Details:\n%s" % details) def _to_map(self, dictionary): map = HashMap() for key, value in dictionary.iteritems(): map.put(key, value) return map # TODO: Remove in 2.9, left here in 2.8.5 for backwards compatibility. # Consider also decoupling importing from __init__ to ease extending. _ListenerProxy = ListenerProxy
userzimmermann/robotframework-python3
src/robot/output/listeners.py
Python
apache-2.0
10,498
0.001048
#!/usr/bin/env python2 ''' Description: Author: Ronald van Haren, NLeSC (r.vanharen@esciencecenter.nl) Created: - Last Modified: - License: Apache 2.0 Notes: - ''' from lxml.html import parse import csv import urllib2 from lxml import html import numbers import json import os import utils from numpy import vstack import argparse class get_knmi_reference_data: ''' description ''' def __init__(self, opts): #self.outputdir = opts.outputdir self.csvfile = opts.csvfile self.outputdir = opts.outputdir self.keep = opts.keep self.check_output_dir() if len(opts.stationid)==0: self.get_station_ids() else: self.stationdids = [opts.stationid] self.download_station_data() self.get_station_locations() def get_station_ids(self): ''' get all stationids from the KNMI website ''' self.url = 'http://www.knmi.nl/klimatologie/uurgegevens/' page = parse(self.url) # get list of ids rows = page.xpath(".//tbody/@id") #self.stationids = [int(stationid[3:]) for stationid in rows] self.stationids = [str(stationid) for stationid in rows] def download_station_data(self): page = parse(self.url) for stationid in self.stationids: print stationid relpaths = page.xpath(".//tbody[@id='" + stationid + "']/tr/td/span/a/@href") for path in relpaths: fullpath = os.path.join(self.url, path) request = urllib2.urlopen(fullpath) filename = os.path.basename(path) outputfile = os.path.join(self.outputdir, filename) if self.keep: if os.path.exists(outputfile): # check if filesize is not null if os.path.getsize(outputfile) > 0: # file exists and is not null, continue next iteration continue else: # file exists but is null, so remove and redownload os.remove(outputfile) elif os.path.exists(outputfile): os.remove(outputfile) #save output = open(outputfile, "w") output.write(request.read()) output.close() def get_station_locations(self): # get station names for stationids url = 'http://www.knmi.nl/klimatologie/metadata/stationslijst.html' page = parse(url) url_metadata = page.xpath(".//table/tr/td/a/@href") station_name_id = [c.text for c in page.xpath(".//table/tr/td/a")] station_id = [s.split()[0] for s in station_name_id] station_names = [" ".join(s.split()[1:]) for s in station_name_id] for idx, stationid in enumerate(station_id): station_url = os.path.join(os.path.split(url)[0], url_metadata[idx]) page = parse(station_url) rows = [c.text for c in page.xpath(".//table/tr/td")] idx_position = rows.index('Positie:') + 1 idx_startdate = rows.index('Startdatum:') + 1 lat, lon = rows[idx_position].encode('UTF-8').replace( '\xc2\xb0','').replace(' N.B. ', ',').replace( 'O.L.','').strip().split(',') lat,lon = self.latlon_conversion(lat,lon) try: dataout = vstack((dataout, [station_id[idx], station_names[idx], lat, lon, station_url])) except NameError: dataout = [station_id[idx], station_names[idx], lat, lon, station_url] header = ['station_id', 'station_name','latitude', 'longitude', 'url'] dataout = vstack((header, dataout)) # write to csv file utils.write_csvfile(self.csvfile, dataout) # get station locations pass def latlon_conversion(self, lat, lon): ''' conversion of GPS position to lat/lon decimals example string for lat and lon input: "52 11'" ''' # latitude conversion latd = lat.replace("'","").split() lat = float(latd[0]) + float(latd[1])/60 # longitude conversion lond = lon.replace("'","").split() lon = float(lond[0]) + float(lond[1])/60 return lat,lon def check_output_dir(self): ''' check if outputdir exists and create if not ''' if not os.path.exists(self.outputdir): os.makedirs(self.outputdir) if __name__ == "__main__": # define argument menu description = 'Get data KNMI reference stations' parser = argparse.ArgumentParser(description=description) # fill argument groups parser.add_argument('-o', '--outputdir', help='Data output directory', default=os.path.join(os.getcwd(),'KNMI'), required=False) parser.add_argument('-s', '--stationid', help='Station id', default='', required=False, action='store') parser.add_argument('-c', '--csvfile', help='CSV data file', required=True, action='store') parser.add_argument('-k', '--keep', help='Keep downloaded files', required=False, action='store_true') parser.add_argument('-l', '--log', help='Log level', choices=utils.LOG_LEVELS_LIST, default=utils.DEFAULT_LOG_LEVEL) # extract user entered arguments opts = parser.parse_args() # define logger logname = os.path.basename(__file__) + '.log' logger = utils.start_logging(filename=logname, level=opts.log) # process data get_knmi_reference_data(opts)
rvanharen/SitC
knmi_getdata.py
Python
apache-2.0
5,962
0.003187