prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
from indiefilmrentals.products.models im
port * from shop_simplecategories.models import * def categories(request): ret
urn {'categories': Category.objects.all()}
flow.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ '_api/airflow/_vendor', '_api/airflow/api', '_api/airflow/bin', '_api/airflow/config_templates', '_api/airflow/configuration', '_api/airflow/contrib/auth', '_api/airflow/contrib/example_dags', '_api/airflow/contrib/index.rst', '_api/airflow/contrib/kubernetes', '_api/airflow/contrib/task_runner', '_api/airflow/contrib/utils', '_api/airflow/dag', '_api/airflow/default_login', '_api/airflow/example_dags', '_api/airflow/exceptions', '_api/airflow/index.rst', '_api/airflow/jobs', '_api/airflow/lineage', '_api/airflow/logging_config', '_api/airflow/macros', '_api/airflow/migrations', '_api/airflow/plugins_manager', '_api/airflow/security', '_api/airflow/serialization', '_api/airflow/settings', '_api/airflow/sentry', '_api/airflow/stats', '_api/airflow/task', '_api/airflow/ti_deps', '_api/airflow/utils', '_api/airflow/version', '_api/airflow/www', '_api/airflow/www_rbac', '_api/main', 'autoapi_templates', 'howto/operator/gcp/_partials', ] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. keep_warnings = True intersphinx_mapping = { 'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None), 'mongodb': ('https://api.mongodb.com/python/current/', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'python': ('https://docs.python.org/3/', None), 'requests': ('https://requests.readthedocs.
io/en/master/', None), 'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None), 'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None), # google-cloud-python 'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None), 'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None), 'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None), 'goo
gle-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None), 'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None), 'google-cloud-container': ('https://googleapis.dev/python/container/latest', None), 'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None), 'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None), 'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None), 'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None), 'google-cloud-language': ('https://googleapis.dev/python/language/latest', None), 'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None), 'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None), 'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None), 'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None), 'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None), 'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None), 'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None), 'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None), 'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None), 'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None), } # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' if airflow_theme_is_available: html_theme = 'sphinx_airflow_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] import sphinx_rtd_theme # pylint: disable=wrong-import-position,wrong-import-order html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "Airflow Documentation" # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "" # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None html_favicon = "../airflow/www/static/pin_32.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # A list of JavaScript filename. The entry must be a filename string or a # tuple containing the filename string and the attributes dictionary. The # filename must be relative to the html_static_path, or a full URI with # scheme like http://example.org/script.js. html_js_files = ['jira-links.js'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. if airflow_theme_is_available: html_sidebars = { '**': [ 'version-selector.html', 'searchbox.html', 'globaltoc.html', ] } # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base na
. first_image_file = image_path % 1 if os.path.exists(stdout_path): stdout = open(stdout_path).read() else: stdout = '' if os.path.exists(time_path): time_elapsed = float(open(time_path).read()) if not os.path.exists(first_image_file) or \ os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime: # We need to execute the code print('plotting %s' % fname) t0 = time() import matplotlib.pyplot as plt plt.close('all') cwd = os.getcwd() try: # First CD in the original example dir, so that any file # created by the example get created in this directory orig_stdout = sys.stdout os.chdir(os.path.dirname(src_file)) my_buffer = StringIO() my_stdout = Tee(sys.stdout, my_buffer) sys.stdout = my_stdout my_globals = {'pl': plt} execfile(os.path.basename(src_file), my_globals) time_elapsed = time() - t0 sys.stdout = orig_stdout my_stdout = my_buffer.getvalue() if '__doc__' in my_globals: # The __doc__ is often printed in the example, we # don't with to echo it my_stdout = my_stdout.replace( my_globals['__doc__'], '') my_stdout = my_stdout.strip().expandtabs() if my_stdout: stdout = '**Script output**::\n\n %s\n\n' % ( '\n '.join(my_stdout.split('\n'))) open(stdout_path, 'w').write(stdout) open(time_path, 'w').write('%f' % time_elapsed) os.chdir(cwd) # In order to save every figure we have two solutions : # * iterate from 1 to infinity and call plt.fignum_exists(n) # (this requires the figures to be numbered # incrementally: 1, 2, 3 and not 1, 2, 5) # * iterate over [fig_mngr.num for fig_mngr in # matplotlib._pylab_helpers.Gcf.get_all_fig_managers()] fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers() for fig_mngr in fig_managers: # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. fig = plt.figure(fig_mngr.num) kwargs = {} to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: fig_attr = getattr(fig, 'get_' + attr)() default_attr = matplotlib.rcParams['figure.' + attr] if to_rgba(fig_attr) != to_rgba(default_attr): kwargs[attr] = fig_attr fig.savefig(image_path % fig_mngr.num, **kwargs) figure_list.append(image_fname % fig_mngr.num) except: print(80 * '_') print('%s is not compiling:' % fname) traceback.print_exc() print(80 * '_') finally: os.chdir(cwd) sys.stdout = orig_stdout print(" - time elapsed : %.2g sec" % time_elapsed) else: figure_list = [f[len(image_dir):] for f in glob.glob(image_path.replace("%03d", '[0-9][0-9][0-9]'))] figure_list.sort() # generate thumb file this_template = plot_rst_template car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/') # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file` # which is within `auto_examples/../images/thumbs` depending on the example. # Because the carousel has different dimensions than those of the examples gallery, # I did not simply reuse them all as some contained whitespace due to their default gallery # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't # just be overwritten with the carousel dimensions as it messes up the examples gallery layout). # The special carousel thumbnails are written directly to _build/html/stable/_images/, # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file'
as well, and then # copied to the _images folder during the `Copying Downloadable Files` step like the rest. if not os.path.exists(car_thumb_path): os.makedirs(car_thumb_path) if os.path.exists(first_image_file): # We generate extra special thumbnails for the carousel carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png') first_img = image_fname % 1 if first_img in carousel_thumbs: make_thumbnail((image_path % carousel_thumbs[first_img][0]), carousel_tfile, carousel_thumbs[first_img][1], 190) make_thumbnail(first_image_file, thumb_file, 400, 280) if not os.path.exists(thumb_file): # create something to replace the thumbnail make_thumbnail('images/no_image.png', thumb_file, 200, 140) docstring, short_desc, end_row = extract_docstring(example_file) # Depending on whether we have one or more figures, we're using a # horizontal list or a single rst call to 'image'. if len(figure_list) == 1: figure_name = figure_list[0] image_list = SINGLE_IMAGE % figure_name.lstrip('/') else: image_list = HLIST_HEADER for figure_name in figure_list: image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') time_m, time_s = divmod(time_elapsed, 60) f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w') f.write(this_template % locals()) f.flush() # save variables so we can later add links to the documentation if six.PY2: example_code_obj = identify_names(open(example_file).read()) else: example_code_obj = \ identify_names(open(example_file, encoding='utf-8').read()) if example_code_obj: codeobj_fname = example_file[:-3] + '_codeobj.pickle' with open(codeobj_fname, 'wb') as fid: pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL) backrefs = set('{module_short}.{name}'.format(**entry) for entry in example_code_obj.values() if entry['module'].startswith('sklearn')) return backrefs def embed_code_links(app, exception): """Embed hyperlinks to documentation into example code""" if exception is not None: return print('Embedding documentation hyperlinks in examples..') if app.builder.name == 'latex': # Don't embed hyperlinks when a latex builder is used. return # Add resolvers for the packages for which we want to show links doc_resolvers = {} doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir, relative=True) resolver_urls = { 'matplotlib': 'http://matplotlib.org', 'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0', 'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference', } for this_module, url in resolver_urls.items(): try: doc_resolvers[this_module] = SphinxDocLinkResolver(url) except HTTPError as e: print("The following HTTP Error has occurred:\n") print(e.code) except URLError as e: print("\n...\n" "Warning: Embeddi
# -*- coding: utf-8 -*- ''' Interface with a Junos device via proxy-minion. ''' # Import python libs from __future__ import print_function from __future__ import absolute_import import logging # I
mport 3rd-party libs import jnpr.junos import jnpr.junos.utils import jnpr.junos.utils.config import json HAS_JUNOS = True __proxyenabled__ = ['junos'] thisproxy = {} log = l
ogging.getLogger(__name__) def init(opts): ''' Open the connection to the Junos device, login, and bind to the Resource class ''' log.debug('Opening connection to junos') thisproxy['conn'] = jnpr.junos.Device(user=opts['proxy']['username'], host=opts['proxy']['host'], password=opts['proxy']['passwd']) thisproxy['conn'].open() thisproxy['conn'].bind(cu=jnpr.junos.utils.config.Config) def conn(): return thisproxy['conn'] def facts(): return thisproxy['conn'].facts def refresh(): return thisproxy['conn'].facts_refresh() def proxytype(): ''' Returns the name of this proxy ''' return 'junos' def id(opts): ''' Returns a unique ID for this proxy minion ''' return thisproxy['conn'].facts['hostname'] def ping(): ''' Ping? Pong! ''' return thisproxy['conn'].connected def shutdown(opts): ''' This is called when the proxy-minion is exiting to make sure the connection to the device is closed cleanly. ''' log.debug('Proxy module {0} shutting down!!'.format(opts['id'])) try: thisproxy['conn'].close() except Exception: pass def rpc(): return json.dumps(thisproxy['conn'].rpc.get_software_information())
from PyQt5.QtCore import pyqtSlot, QThread, pyqtSignal import os from PyQt5.QtWidgets import QFileDialog, QProgressDialog, QMessageBox from PyQt5.QtCore import pyqtSlot, QObject from books.soldiers import processData import route_gui from lxml import etree import multiprocessing import math class XmlImport(QObject): threadUpdateSignal = pyqtSignal(int, int, name="progressUpdate") threadExceptionSignal = pyqtSignal(object, name="exceptionInProcess") threadResultsSignal = pyqtSignal(dict, name="results") finishedSignal = pyqtSignal(dict, str, name="processFinished") def __init__(self, parent): super(XmlImport, self).__init__(parent) self.parent = parent self.processCount = 0 self.result = {} self.thread = QThread(parent = self.parent) self.threadUpdateSignal.connect(self._updateProgressBarInMainThread) self.threadExceptionSignal.connect(self._loadingFailed) self.threadResultsSignal.connect(self._processFinished) self.filepath = "" def importOne(self, xmlEntry): if self.processor is not None: result = self.processor.extractOne(xmlEntry) return result else: return None @pyqtSlot() def openXMLFile(self): filename = QFileDialog.getOpenFileName(self.parent, "Open xml-file containing the data to be analyzed.", ".", "Person data files (*.xml);;All files (*)") if filename[0] != "": self.filepath = filename[0] self.parent.setWindowTitle("Kaira " + filename[0]) self._analyzeOpenedXml(filename) def _analyzeOpenedXml(self, file): self.progressDialog = QProgressDialog(self.parent) self.progressDialog.setCancelButton(None) self.progressDialog.setLabelText("Extracting provided datafile...") self.progressDialog.open() self.prog
ressDialog.setValue(0) self.file = file self.thread.run = self._runProcess self.thread.start() def _runProcess(self): try: xmlDataDocument = self._getXMLroot(self.file[0]) #TODO: Lue xml:n metadata try: #TODO: Moniprosarituki? self.processor = route_gui.Router.get_processdata_class(xmlDataDocument.attri
b["bookseries"])(self._processUpdateCallback) result = self.processor.startExtractionProcess(xmlDataDocument, self.file[0]) self.threadResultsSignal.emit(result) except KeyError: raise MetadataException() except Exception as e: if "DEV" in os.environ and os.environ["DEV"]: raise e else: print(e) self.threadExceptionSignal.emit(e) @pyqtSlot(int, int) def _updateProgressBarInMainThread(self, i, max): self.progressDialog.setRange(0, max) self.progressDialog.setValue(i) @pyqtSlot(object) def _loadingFailed(self, e): self.progressDialog.cancel() import pymongo errMessage = "Error in data-file. Extraction failed. Is the xml valid and in utf-8 format? More info: " if isinstance(e, pymongo.errors.ServerSelectionTimeoutError): errMessage = "Couldn't connect to database. Try going to '/mongodb/data/db' in application directory and deleting 'mongod.lock' file and restart application. More info: " msgbox = QMessageBox() msgbox.information(self.parent, "Extraction failed", errMessage + str(e)) msgbox.show() @pyqtSlot(dict) def _processFinished(self, result): self.result = result self.finishedSignal.emit(self.result, self.filepath) def _processUpdateCallback(self, i, max): self.threadUpdateSignal.emit(i, max) def _getXMLroot(self, filepath): #read the data in XML-format to be processed parser = etree.XMLParser(encoding="utf-8") tree = etree.parse(filepath, parser=parser) #ET.parse(filepath) return tree.getroot() class MetadataException(Exception): def __init__(self): self.msg = "ERROR: The document doesn't contain bookseries attribute in the beginning of the file. Couldn't import. Try " \ "to generate new xml-file from the source ocr-text or add the missing attribute to the file manually." def __str__(self): return repr(self.msg)
import dat
etime import json class Experience: def __init__(self, date, event): self._date = date self._event = event @classmethod def reconstruct_from_db_data_event(cls, date, event): event = json.loads(event) date = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f") exp = Experience(date, event)
return exp def time_from_this_event(self, event): return self._date - event._date
lper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='oc-yang:counter64', is_config=False) self.__sa_valid = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="sa-valid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='oc-yang:counter64', is_config=False) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return ['macsec', 'interfaces', 'interface', 'scsa-rx', 'scsa-rx', 'state', 'counters'] def _get_sc_invalid(self): """ Getter method for sc_invalid, mapped from YANG variable /macsec/interfaces/interface/scsa_rx/scsa_rx/state/counters/sc_invalid (oc-yang:counter64) YANG Description: Invalid Secure Channel RX Packets counter. This counter reflects the number of invalid received packets in a secure channel. """ return self.__sc_invalid def _set_sc_invalid(self, v, load=False): """ Setter method for sc_invalid, mapped from YANG variable /macsec/interfaces/interface/scsa_rx/scsa_rx/state/counters/sc_invalid (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_sc_invalid is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sc_invalid() directly. YANG Description: Invalid Secure Channel RX Packets counter. This counter reflects the number of invalid received packets in a secure channel. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="sc-invalid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """sc_invalid must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="sc-invalid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='oc-yang:counter64', is_config=False)""", }) self.__sc_invalid = t if hasattr(self, '_set'): self._set() def _unset_sc_invalid(self): self.__sc_invalid = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="sc-invalid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='oc-yang:counter64', is_config=False) def _get_sc_valid(self): """ Getter method for sc_valid, mapped from YANG variable /macsec/interfaces/interface/scsa_rx/scsa_rx/state/counters/sc_valid (oc-yang:counter64) YANG Description: Valid Secure Channel RX Packets counter. This counter reflects the number of valid received packets in a secure channel. """ return self.__sc_valid def _set_sc_valid(self, v, load=False): """ Setter method for sc_valid, mapped from YANG variable /macsec/interfaces/interface/scsa_rx/scsa_rx/state/counters/sc_valid (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_sc_valid is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sc_valid() directly. YANG Description: Valid Secure Channel RX Packets counter. This counter reflects the number of valid received packets in a secure channel. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="sc-valid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='oc-yang:counter64', is_config=False) exce
pt (TypeError, ValueError): raise Value
Error({ 'error-string': """sc_valid must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="sc-valid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='oc-yang:counter64', is_config=False)""", }) self.__sc_valid = t if hasattr(self, '_set'): self._set() def _unset_sc_valid(self): self.__sc_valid = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="sc-valid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='oc-yang:counter64', is_config=False) def _get_sa_invalid(self): """ Getter method for sa_invalid, mapped from YANG variable /macsec/interfaces/interface/scsa_rx/scsa_rx/state/counters/sa_invalid (oc-yang:counter64) YANG Description: Invalid Secure Association RX Packets counter. This counter reflects the number of integrity check fails for received packets in a secure association. """ return self.__sa_invalid def _set_sa_invalid(self, v, load=False): """ Setter method for sa_invalid, mapped from YANG variable /macsec/interfaces/interface/scsa_rx/scsa_rx/state/counters/sa_invalid (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_sa_invalid is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sa_invalid() directly. YANG Description: Invalid Secure Association RX Packets counter. This counter reflects the number of integrity check fails for received packets in a secure association. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="sa-invalid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='oc-yang:counter64',
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Port of NNVM version of MobileNet to Relay. """ # pylint: disable=invalid-name from tvm import relay from . import layers from .init import create_workload def conv_block( data, name, channels, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), epsilon=1e-5, layout="NCHW", ): """Helper function to construct conv_bn-relu""" # convolution + bn + relu conv = layers.conv2d( data=data, channels=channels, kernel_size=kernel_size, strides=strides, padding=padding, data_layout=layout, kernel_layout=layers.conv_kernel_layout(layout), name=name + "_conv", ) bn = layers.batch_norm_infer(data=conv, epsilon=epsilon, name=name + "_bn") act = relay.nn.relu(data=bn) return act def separable_conv_block( data, name, depthwise_channels, pointwise_channels, kernel_size=(3, 3), downsample=False, padding=(1, 1), epsilon=1e-5, layout="NCHW", dtype="float32", ): """Helper function to get a separable conv block""" if downsample: strides = (2, 2) else: strides = (1, 1) # depthwise convolution + bn + relu if layout == "NCHW": wshape = (depthwise_channels, 1) + kernel_size elif layout == "NHWC": wshape = kernel_size + (depthwise_channels, 1) else: raise ValueError("Invalid layout: " + layout) bn_axis = layout.index("C") weight = relay.var(name + "_weight", shape=wshape, dtype=dtype) conv1 = layers.conv2d( data=data, weight=weight, channels=depthwise_channels, groups=depthwise_channels, kernel_size=kernel_size, strides=strides, padding=padding, data_layout=layout, kernel_layout=layers.conv_kernel_layout(layout, True), name=name + "_depthwise_conv1", ) bn1 = layers.batch_norm_infer(data=conv1, epsilon=epsilon, axis=bn_axis, name=name + "_bn1") act1 = relay.nn.relu(data=bn1) # pointwise convolution + bn + relu conv2 = layers.conv2d( data=act1, channels=pointwise_channels, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0), data_layout=layout, kernel_layout=layers.conv_kernel_layout(layout), name=name + "_conv2", ) bn2 = layers.batch_norm_infer(data=conv2, epsilon=epsilon, axis=bn_axis, name=name + "_bn2") act2 = relay.nn.relu(data=bn2) return act2 def mobile_net( num_classes=1000, data_shape=(1, 3, 224, 224), dtype="float32", alpha=1.0, is_shallow=False, layout="NCHW", ): """Function to construct a MobileNet""" data = relay.var("data", shape=data_shape, dtype=dtype) body = conv_block(data, "conv_block_1", int(32 * alpha), strides=(2, 2), layout=layout) body = separable_conv_block( body, "separable_conv_block_1", int(32 * alpha), int(64 * alpha), layout=layout, dtype=dt
ype ) body = separable_conv_block( body, "separable_conv_block_2", int(64 * alpha), int(128 * alpha), downsample=True, layout=layout, dtype=dtype, ) body = separable_conv_block( body, "separable_conv_block_3", int(128 * alpha), int(128 * alpha), layout=layo
ut, dtype=dtype, ) body = separable_conv_block( body, "separable_conv_block_4", int(128 * alpha), int(256 * alpha), downsample=True, layout=layout, dtype=dtype, ) body = separable_conv_block( body, "separable_conv_block_5", int(256 * alpha), int(256 * alpha), layout=layout, dtype=dtype, ) body = separable_conv_block( body, "separable_conv_block_6", int(256 * alpha), int(512 * alpha), downsample=True, layout=layout, dtype=dtype, ) if is_shallow: body = separable_conv_block( body, "separable_conv_block_7", int(512 * alpha), int(1024 * alpha), downsample=True, layout=layout, dtype=dtype, ) body = separable_conv_block( body, "separable_conv_block_8", int(1024 * alpha), int(1024 * alpha), downsample=True, layout=layout, dtype=dtype, ) else: for i in range(7, 12): body = separable_conv_block( body, "separable_conv_block_%d" % i, int(512 * alpha), int(512 * alpha), layout=layout, dtype=dtype, ) body = separable_conv_block( body, "separable_conv_block_12", int(512 * alpha), int(1024 * alpha), downsample=True, layout=layout, dtype=dtype, ) body = separable_conv_block( body, "separable_conv_block_13", int(1024 * alpha), int(1024 * alpha), layout=layout, dtype=dtype, ) pool = relay.nn.global_avg_pool2d(data=body, layout=layout) flatten = relay.nn.batch_flatten(data=pool) weight = relay.var("fc_weight") bias = relay.var("fc_bias") fc = relay.nn.dense(data=flatten, weight=weight, units=num_classes) fc = relay.nn.bias_add(fc, bias) softmax = relay.nn.softmax(data=fc) return relay.Function(relay.analysis.free_vars(softmax), softmax) def get_workload( batch_size=1, num_classes=1000, image_shape=(3, 224, 224), dtype="float32", layout="NCHW" ): """Get benchmark workload for mobilenet Parameters ---------- batch_size : int, optional The batch size used in the model num_classes : int, optional Number of classes image_shape : tuple, optional The input image shape, cooperate with layout dtype : str, optional The data type layout : str, optional The data layout of image_shape and the operators cooperate with image_shape Returns ------- mod : tvm.IRModule The relay module that contains a MobileNet network. params : dict of str to NDArray The parameters. """ data_shape = tuple([batch_size] + list(image_shape)) net = mobile_net( num_classes=num_classes, data_shape=data_shape, dtype=dtype, alpha=1.0, is_shallow=False, layout=layout, ) return create_workload(net)
t can can obscure some important differences. Abstract origin references are tracked and annotated (unless disabled). """ import getopt
import os import re import sys import script_utils as u # Input and output file (if not specified, defaults to stdin/stdout) flag_infile = None flag_outfile = None # Perform normalization flag_normalize =
True # Compile units to be included in dump. flag_compunits = {} # Strip offsets if true flag_strip_offsets = False # Strip hi/lo PC and location lists flag_strip_pcinfo = False # Annotate abstract origin refs flag_annotate_abstract = True # Strip these pcinfo_attrs = {"DW_AT_low_pc": 1, "DW_AT_high_pc": 1} # Untracked DW refs untracked_dwrefs = {} # Line buffer linebuf = None linebuffered = False #...................................................................... # Regular expressions to match: # Begin-DIE preamble bdiere = re.compile(r"^(\s*)\<(\d+)\>\<(\S+)\>\:(.*)$") bdiezre = re.compile(r"^\s*Abbrev Number\:\s+0\s*$") bdiebodre = re.compile(r"^\s*Abbrev Number\:\s+\d+\s+\(DW_TAG_(\S+)\)\s*$") # Within-DIE regex indiere = re.compile(r"^(\s*)\<(\S+)\>(\s+)(DW_AT_\S+)(\s*)\:(.*)$") indie2re = re.compile(r"^(\s*)\<(\S+)\>(\s+)(Unknown\s+AT\s+value)(\s*)\:(.*)$") # For grabbing dwarf ref from attr value absore = re.compile(r"^\s*\<\S+\>\s+DW_AT_\S+\s*\:\s*\<0x(\S+)\>.*$") # Attr value dwarf offset attrdwoffre = re.compile(r"^(.*)\<0x(\S+)\>(.*)$") def compute_reloff(absoff, origin): """Compute relative offset from absolute offset.""" oabs = int(absoff, 16) if not flag_normalize: return oabs odec = int(origin, 16) delta = oabs - odec return delta def abstorel(val, diestart): """Convert absolute to relative DIE offset.""" # FIXME: this will not handle backwards refs; that would # require multiple passes. m1 = attrdwoffre.match(val) if m1: absref = m1.group(2) if absref in diestart: val = re.sub(r"<0x%s>" % absref, r"<0x%x>" % diestart[absref], val) u.verbose(3, "abs %s converted to rel %s" % (absref, val)) return (0, val) return (1, absref) return (2, None) def munge_attrval(attr, oval, diestart): """Munge attr value.""" # Convert abs reference to rel reference. # FIXME: this will not handle backwards refs; that would # require multiple passes. code, val = abstorel(oval, diestart) if code == 1: absref = val if absref in untracked_dwrefs: val = untracked_dwrefs[absref] else: n = len(untracked_dwrefs) if flag_normalize: unk = (" <untracked %d>" % (n+1)) else: unk = (" <untracked 0x%s>" % absref) untracked_dwrefs[absref] = unk val = unk if code == 2: val = oval if flag_strip_pcinfo: if attr in pcinfo_attrs: val = "<stripped>" return val def read_line(inf): """Read an input line.""" global linebuffered global linebuf if linebuffered: linebuffered = False u.verbose(3, "buffered line is %s" % linebuf) return linebuf line = inf.readline() u.verbose(3, "line is %s" % line.rstrip()) return line def unread_line(line): """Unread an input line.""" global linebuffered global linebuf u.verbose(3, "unread_line on %s" % line.rstrip()) if linebuffered: u.error("internal error: multiple line unread") linebuffered = True linebuf = line def read_die(inf, outf): """Reads in and returns the next DIE.""" lines = [] indie = False while True: line = read_line(inf) if not line: break m1 = bdiere.match(line) if not indie: if m1: lines.append(line) indie = True continue outf.write(line) else: if m1: unread_line(line) break m2 = indiere.match(line) if not m2: m2 = indie2re.match(line) if not m2: unread_line(line) break lines.append(line) u.verbose(2, "=-= DIE read:") for line in lines: u.verbose(2, "=-= %s" % line.rstrip()) return lines def emit_die(lines, outf, origin, diename, diestart): """Emit body of DIE.""" # First line m1 = bdiere.match(lines[0]) if not m1: u.error("internal error: first line of DIE " "should match bdiere: %s" % lines[0]) sp = m1.group(1) depth = m1.group(2) absoff = m1.group(3) rem = m1.group(4) off = compute_reloff(absoff, origin) if flag_strip_offsets: outf.write("%s<%s>:%s\n" % (sp, depth, rem)) else: outf.write("%s<%s><%0x>:%s\n" % (sp, depth, off, rem)) # Remaining lines for line in lines[1:]: m2 = indiere.match(line) if not m2: m2 = indie2re.match(line) if not m2: u.error("internal error: m2 match failed on attr line") sp1 = m2.group(1) absoff = m2.group(2) sp2 = m2.group(3) attr = m2.group(4) sp3 = m2.group(5) rem = m2.group(6) addend = "" off = compute_reloff(absoff, origin) u.verbose(3, "attr is %s" % attr) # Special sauce if abs origin. if attr == "DW_AT_abstract_origin": m3 = absore.match(line) if m3: absoff = m3.group(1) reloff = compute_reloff(absoff, origin) if reloff in diename: addend = "// " + diename[reloff] else: u.verbose(2, "absore() failed on %s\n", line) # Post-process attr value rem = munge_attrval(attr, rem, diestart) # Emit if flag_strip_offsets: outf.write("%s%s%s:%s%s%s\n" % (sp1, sp2, attr, sp3, rem, addend)) else: outf.write("%s<%0x>%s%s:%s%s%s\n" % (sp1, off, sp2, attr, sp3, rem, addend)) def attrval(lines, tattr): """Return the specified attr for this DIE (or empty string if no name).""" for line in lines[1:]: m2 = indiere.match(line) if not m2: m2 = indie2re.match(line) if not m2: u.error("attr match failed for %s" % line) attr = m2.group(4) if attr == tattr: rem = m2.group(6) return rem.strip() return "" def perform_filt(inf, outf): """Read inf and filter contents to outf.""" # Records DIE starts: hex string => new offset diestart = {} # Maps rel DIE offset to name. Note that not all DIEs have names. diename = {} # Origin (starting absolute offset) origin = None # Set to true if output is filtered off filtered = False if flag_compunits: u.verbose(1, "Selected compunits:") for cu in sorted(flag_compunits): u.verbose(1, "%s" % cu) # Read input while True: dielines = read_die(inf, outf) if not dielines: break # Process starting line of DIE line1 = dielines[0] m1 = bdiere.match(line1) if not m1: u.error("internal error: first line of DIE should match bdiere") absoff = m1.group(3) rem = m1.group(4) if not origin: u.verbose(2, "origin set to %s" % absoff) origin = absoff off = compute_reloff(absoff, origin) diestart[absoff] = off # Handle zero terminators. if bdiezre.match(rem): if not filtered: emit_die(dielines, outf, origin, diename, diestart) continue # See what flavor of DIE this is to adjust filtering. m2 = bdiebodre.match(rem) if not m2: u.error("bdiebodre/bdiezre match failed on: '%s'" % rem) tag = m2.group(1) u.verbose(2, "=-= tag = %s" % tag) if flag_compunits and tag == "compile_unit": name = attrval(dielines, "DW_AT_name") if name: if name in flag_compunits: u.verbose(1, "=-= output enabled since %s is in compunits" % name) filtered = False else: u.verbose(1, "=-= output disabled since %s not in compunits" % name) filtered = True # Emit die if not filtered if not filtered: u.verbose(2, "=-= emit DIE") emit_die(dielines, outf, origin, diename, diestart) else: u.verbose(2, "=-= flush DIE (filtered): %s" % dielines[0]) def perform(): """Main driver routine.""" inf = sys.stdin outf = sys.stdout if flag_infile: try: inf = open(flag_infile, "rb") except IOError as e: u.error("unable to open input file %s: " "%s" % (flag_infile, e.strerr
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest from unittest import mock from zdesk import RateLimitError from airflow.providers.zendesk.hooks.zendesk import ZendeskHook class TestZendeskHook(unittest.TestCase): @mock.patch("airflow.providers.zendesk.hooks.zendesk.time") def test_sleeps_for_correct_interval(self, mocked_time): sleep_time = 10 # To break out of the otherwise infinite tries mocked_time.sleep = mock.Mock(side_effect=ValueError, return_value=3) conn_mock = mock.Mock() mock_response = mock.Mock() mock_response.headers.get.return_value = sleep_time conn_mock.call = mock.Mock( side_effect=RateLimitError(msg="some message", code="some code", response=mock_response)) zendesk_hook = ZendeskHook("conn_id") zendesk_hook.get_conn = mock.Mock(return_value=conn_mock) with self.assertRaises(ValueError): zendesk_hook.call("some_path", get_all_pages=False) mocked_time.sleep.assert_called_once_with(sleep_time) @mock.patch("airflow.providers.zendesk.hooks.zendesk.Zendesk") def test_returns_single_page_if_get_all_pages_false(self, _): zendesk_hook = ZendeskHook("conn_id") mock_connection = mock.Mock() mock_connection.host = "some_host" zendesk_hook.get_connection = mock.Mock(return_value=mock_connection) zendesk_hook.get_conn() mock_conn = mock.Mock() mock_call = mock.Mock( return_value={'next_page': 'https://some_host/something', 'path': []}) mock_conn.call = mock_call zendesk_hook.get_conn = mock.Mock(return_value=mock_conn) zendesk_hook.call("path", get_all_pages=False) mock_call.assert_called_once_with("path", None) @mock.patch("airflow.providers.zendesk.hooks.zendesk.Zendesk") def test_returns_multiple_pages_if_get_all_pages_true(self, _): zendesk_hook = ZendeskHook("conn_id") mock_connection = mock.Mock() mock_connection.host = "some_host" zendesk_hook.get_connection = mock.Mock(return_value=mock_connection) zendesk_hook.get_conn() mock_conn = mock.Mock() mock_call = mock.Mock( return_value={'next_page': 'https://some_host/something', 'path': []}) mock_conn.call = mock_call zendesk_hook.get_conn = mock.Mock(return_value=mock_conn) zendesk_hook.call("path", get_all_pages=True) assert mock_call.call_count == 2 @mock.patch("airflow.providers.zendesk.hooks.zendesk.Zendesk") def test_zdesk_is_inited_correctly(self, mock_zendesk): conn_mock = mock.Mock() conn_mock.host = "conn_host" conn_mock.login = "conn_login" conn_mock.passw
ord = "conn_pass" zendesk_hook = ZendeskHook("conn_id") zendesk_hook.get_connection = mock.Mock(return_value=conn_mock) zendesk_hook.get_conn() mock_zendesk.assert_called_once_with(zdesk_url='https://conn_host', zdesk_email='conn_login', zdesk_password='conn_pass', zdesk_token=Tr
ue) @mock.patch("airflow.providers.zendesk.hooks.zendesk.Zendesk") def test_zdesk_sideloading_works_correctly(self, mock_zendesk): zendesk_hook = ZendeskHook("conn_id") mock_connection = mock.Mock() mock_connection.host = "some_host" zendesk_hook.get_connection = mock.Mock(return_value=mock_connection) zendesk_hook.get_conn() mock_conn = mock.Mock() mock_call = mock.Mock( return_value={'next_page': 'https://some_host/something', 'tickets': [], 'users': [], 'groups': []}) mock_conn.call = mock_call zendesk_hook.get_conn = mock.Mock(return_value=mock_conn) results = zendesk_hook.call(".../tickets.json", query={"include": "users,groups"}, get_all_pages=False, side_loading=True) assert results == {'groups': [], 'users': [], 'tickets': []}
# -*- coding:utf-8 -*- from sqlalchemy import desc, func from atlas.modeles.entities.vmSearchTaxon import VmSearchTaxon def listeTaxons(session): """ revoie un tableau de dict : label = nom latin et nom francais concatene, value = cd_ref TODO Fonction inutile à supprimer !!! """ req = session.query(VmSearchTaxon.search_name, VmSearchTaxon.cd_ref).all() taxonList = list() for r in req: temp = {"label": r[0], "value": r[1]} taxonList.append(
temp) return taxonList def listeTaxonsSearch(session, search, limit=50): """ Recherche dans la VmSearchTaxon en ilike Utilisé pour l'autocomplétion de la recherche de taxon :query SQLA_Session session :query str search : chaine de charactere pour la recherche
:query int limit: limite des résultats **Returns:** list: retourne un tableau {'label':'str': 'value': 'int'} label = search_name value = cd_ref """ req = session.query( VmSearchTaxon.search_name, VmSearchTaxon.cd_ref, func.similarity(VmSearchTaxon.search_name, search).label("idx_trgm"), ).distinct() search = search.replace(" ", "%") req = ( req.filter(VmSearchTaxon.search_name.ilike("%" + search + "%")) .order_by(desc("idx_trgm")) .order_by(VmSearchTaxon.cd_ref == VmSearchTaxon.cd_nom) .limit(limit) ) data = req.all() return [{"label": d[0], "value": d[1]} for d in data]
Fields not provided will be None. Partly backwards-compatible with :mod:`urlparse`. Example: :: >>> parse_url('http://google.com/mail/') Url(scheme='http', host='google.com', port=None, path='/', ...) >>> parse_url('google.com:80') Url(scheme=None, host='google.com', port=80, path=None, ...) >>> parse_url('/foo?bar') Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) """ # While this code has overlap with stdlib's urlparse, it is much # simplified for our needs and less annoying. # Additionally, this imeplementations does silly things to be optimal # on CPython. scheme = None auth = None host = None port = None path = None fragment = None query = None # Scheme if '://' in url: scheme, url = url.split('://', 1) # Find the earliest Authority Terminator # (http://tools.ietf.org/html/rfc3986#section-3.2) url, path_, delim = split_first(url, ['/', '?', '#']) if delim: # Reassemble the path path = delim + path_ # Auth if '@' in url: auth, url = url.split('@', 1) # IPv6 if url and url[0] == '[': host, url = url[1:].split(']', 1) # Port if ':' in url: _host, port = url.split(':', 1) if not host: host = _host if not port.isdigit(): raise LocationParseError("Failed to parse: %s" % url) port = int(port) elif not host and url: host = url if not path: return Url(scheme, auth, host, port, path, query, fragment) # Fragment if '#' in path: path, fragment = path.split('#', 1) # Query if '?' in path: path, query = path.split('?', 1) return Url(scheme, auth, host, port, path, query, fragment) def get_host(url): """ Deprecated. Use :func:`.parse_url` instead. """ p = parse_url(url) return p.scheme or 'http', p.hostname, p.port def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, basic_auth=None): """ Shortcuts for generating request headers. :param keep_alive: If ``True``, adds 'connection: keep-alive' header. :p
aram accept_encoding: Can be a boolean, list, or string. ``True`` translates to 'gzip,deflate'. List will get joined by comma. String
will be used as provided. :param user_agent: String representing the user-agent you want, such as "python-urllib3/0.6" :param basic_auth: Colon-separated username:password string for 'authorization: basic ...' auth header. Example: :: >>> make_headers(keep_alive=True, user_agent="Batman/1.0") {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} >>> make_headers(accept_encoding=True) {'accept-encoding': 'gzip,deflate'} """ headers = {} if accept_encoding: if isinstance(accept_encoding, str): pass elif isinstance(accept_encoding, list): accept_encoding = ','.join(accept_encoding) else: accept_encoding = 'gzip,deflate' headers['accept-encoding'] = accept_encoding if user_agent: headers['user-agent'] = user_agent if keep_alive: headers['connection'] = 'keep-alive' if basic_auth: headers['authorization'] = 'Basic ' + \ b64encode(six.b(basic_auth)).decode('utf-8') return headers def is_connection_dropped(conn): # Platform-specific """ Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ sock = getattr(conn, 'sock', False) if not sock: # Platform-specific: AppEngine return False if not poll: if not select: # Platform-specific: AppEngine return False try: return select([sock], [], [], 0.0)[0] except SocketError: return True # This version is better on platforms that support it. p = poll() p.register(sock, POLLIN) for (fno, ev) in p.poll(0.0): if fno == sock.fileno(): # Either data is buffered (bad), or the connection is dropped. return True def resolve_cert_reqs(candidate): """ Resolves the argument to a numeric constant, which can be passed to the wrap_socket function/method from the ssl module. Defaults to :data:`ssl.CERT_NONE`. If given a string it is assumed to be the name of the constant in the :mod:`ssl` module or its abbrevation. (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. If it's neither `None` nor a string we assume it is already the numeric constant which can directly be passed to wrap_socket. """ if candidate is None: return CERT_NONE if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'CERT_' + candidate) return res return candidate def resolve_ssl_version(candidate): """ like resolve_cert_reqs """ if candidate is None: return PROTOCOL_SSLv23 if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'PROTOCOL_' + candidate) return res return candidate def assert_fingerprint(cert, fingerprint): """ Checks if given fingerprint matches the supplied certificate. :param cert: Certificate as bytes object. :param fingerprint: Fingerprint as string of hexdigits, can be interspersed by colons. """ # Maps the length of a digest to a possible hash function producing # this digest. hashfunc_map = { 16: md5, 20: sha1 } fingerprint = fingerprint.replace(':', '').lower() digest_length, rest = divmod(len(fingerprint), 2) if rest or digest_length not in hashfunc_map: raise SSLError('Fingerprint is of invalid length.') # We need encode() here for py32; works on py2 and p33. fingerprint_bytes = unhexlify(fingerprint.encode()) hashfunc = hashfunc_map[digest_length] cert_digest = hashfunc(cert).digest() if not cert_digest == fingerprint_bytes: raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' .format(hexlify(fingerprint_bytes), hexlify(cert_digest))) if SSLContext is not None: # Python 3.2+ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None): """ All arguments except `server_hostname` have the same meaning as for :func:`ssl.wrap_socket` :param server_hostname: Hostname of the expected certificate """ context = SSLContext(ssl_version) context.verify_mode = cert_reqs if ca_certs: try: context.load_verify_locations(ca_certs) # Py32 raises IOError # Py33 raises FileNotFoundError except Exception: # Reraise as SSLError e = sys.exc_info()[1] raise SSLError(e) if certfile: # FIXME: This block needs a test. context.load_cert_chain(certfile, keyfile) if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI return context.wrap_socket(sock, server_hostname=server_hostname) return context.wrap_socket(sock) else: # Python 3.1 and earlier def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None): return wrap_socket(sock, keyfile=keyfile, certfile=certfile, ca_certs=ca_certs, cert_reqs=cert_reqs,
from __future__ import absolute_
import from fulltext.backends import __html from fulltext.util import run, assert_cmd_exists from fulltext.util import BaseBackend def cmd(path, **kwargs): cmd = ['hwp5proc', 'xml'] cmd.extend([path]) return cmd def to_text_with_backend(html): return __html.handle_fobj(html) class Backend(BaseBackend): def check(self, title): assert_cmd_exists('hwp5proc') def handle_path(self, path): out = self.decode(run(*cmd(path))) return to_text_with_backe
nd(out)
e/views/entrance_exam.py for usage is_entrance_exam = Boolean( display_name=_("Is Entrance Exam"), help=_( "Tag this course module as an Entrance Exam. " "Note, you must enable Entrance Exams for this course setting to take effect." ), default=False, scope=Scope.settings, ) individual_start_days = Integer( help=_("Number of days from the base date to the chapter starts"), scope=Scope.settings ) individual_start_hours = Integer( help=_("Number of hours from the base date to the chapter starts"), scope=Scope.settings ) individual_start_minutes = Integer( help=_("Number of minutes from the base date to the chapter starts"), scope=Scope.settings ) individual_due_days = Integer( help=_("Number of days from the base date to the due"), scope=Scope.settings ) individual_due_hours = Integer( help=_("Number of hours from the base date to the due"), scope=Scope.settings ) individual_due_minutes = Integer( help=_("Number of minutes from the base date to the due"), scope=Scope.settings ) progress_restriction = Dict( help=_("Settings for progress restriction"), default={ "type": "No Restriction", }, scope=Scope.settings ) class ProctoringFields(object): """ Fields that are specific to Proctored or Timed Exams """ is_time_limited = Boolean( display_name=_("Is Time Limited"), help=_( "This setting indicates whether students have a limited time" " to view or interact with this courseware component." ), default=False, scope=Scope.settings, ) default_time_limit_minutes = Integer( display_name=_("Time Limit in Minutes"), help=_( "The number of minutes available to students for viewing or interacting with this courseware component." ), default=None, scope=Scope.settings, ) is_proctored_enabled = Boolean( display_name=_("Is Proctoring Enabled"), help=_( "This setting indicates whether this exam is a proctored exam." ), default=False, scope=Scope.settings, ) is_practice_exam = Boolean( display_name=_("Is Practice Exam"), help=_( "This setting indicates whether this exam is for testing purposes only. Practice exams are not verified." ), default=False, scope=Scope.settings, ) @property def is_proctored_exam(self): """ Alias the is_proctored_enabled field to the more legible is_proctored_exam """ return self.is_proctored_enabled @is_proctored_exam.setter def is_proctored_exam(self, value): """ Alias the is_proctored_enabled field to the more legible is_proctored_exam """ self.is_proctored_enabled = value @XBlock.wants('proctoring') @XBlock.wants('credit') class SequenceModule(SequenceFields, ProctoringFields, XModule): ''' Layout module which lays out content in a temporal sequence ''' js = { 'coffee': [resource_string(__name__, 'js/src/sequence/display.coffee')], 'js': [resource_string(__name__, 'js/src/sequence/display/jquery.sequence.js')], } css = { 'scss':
[resource_string(__name__, 'css/sequence/display.scss')], } js_module_name = "Sequence" def __init__(self, *args, **kwargs): super(SequenceModule, self).__init__(*args, **kw
args) # If position is specified in system, then use that instead. position = getattr(self.system, 'position', None) if position is not None: try: self.position = int(self.system.position) except (ValueError, TypeError): # Check for https://openedx.atlassian.net/browse/LMS-6496 warnings.warn( "Sequential position cannot be converted to an integer: {pos!r}".format( pos=self.system.position, ), RuntimeWarning, ) def get_progress(self): ''' Return the total progress, adding total done and total available. (assumes that each submodule uses the same "units" for progress.) ''' # TODO: Cache progress or children array? children = self.get_children() progresses = [child.get_progress() for child in children] progress = reduce(Progress.add_counts, progresses, None) return progress def handle_ajax(self, dispatch, data): # TODO: bounds checking ''' get = request.POST instance ''' if dispatch == 'goto_position': # set position to default value if either 'position' argument not # found in request or it is a non-positive integer position = data.get('position', u'1') if position.isdigit() and int(position) > 0: self.position = int(position) else: self.position = 1 return json.dumps({'success': True}) raise NotFoundError('Unexpected dispatch type') def student_view(self, context): # If we're rendering this sequence, but no position is set yet, # default the position to the first element if self.position is None: self.position = 1 ## Returns a set of all types of all sub-children contents = [] fragment = Fragment() # Is this sequential part of a timed or proctored exam? if self.is_time_limited: view_html = self._time_limited_student_view(context) # Do we have an alternate rendering # from the edx_proctoring subsystem? if view_html: fragment.add_content(view_html) return fragment for child in self.get_display_items(): progress = child.get_progress() rendered_child = child.render(STUDENT_VIEW, context) fragment.add_frag_resources(rendered_child) # `titles` is a list of titles to inject into the sequential tooltip display. # We omit any blank titles to avoid blank lines in the tooltip display. titles = [title.strip() for title in child.get_content_titles() if title.strip()] childinfo = { 'content': rendered_child.content, 'title': "\n".join(titles), 'page_title': titles[0] if titles else '', 'progress_status': Progress.to_js_status_str(progress), 'progress_detail': Progress.to_js_detail_str(progress), 'type': child.get_icon_class(), 'id': child.scope_ids.usage_id.to_deprecated_string(), } if childinfo['title'] == '': childinfo['title'] = child.display_name_with_default contents.append(childinfo) params = { 'items': contents, 'element_id': self.location.html_id(), 'item_id': self.location.to_deprecated_string(), 'position': self.position, 'tag': self.location.category, 'ajax_url': self.system.ajax_url, } fragment.add_content(self.system.render_template("seq_module.html", params)) return fragment def _time_limited_student_view(self, context): """ Delegated rendering of a student view when in a time limited view. This ultimately calls down into edx_proctoring pip installed djangoapp """ # None = no overridden view rendering view_html = None proctoring_service = self.runtime.service(self, 'proctoring') credit_service = self.runtime.service(self, 'credit') # Is this sequence designated as a Timed Examination, which includes # Proctored Exams feature_enabled = ( proctoring_service and credit_service and self.is_time_limited ) if feature_ena
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import re import shutil import sys from setuptools import setup def get_version(package): """ Return package version as listed in `__version__` in `init.py`. """ with open(os.path.join(package, '__init__.py'), 'rb') as init_py: src = init_py.read().decode('utf-8') return re.search("__version__ = ['\"]([^'\"]+)['\"]", src).group(1) name = 'djangorestframework-jwt' version = get_version('rest_framework_jwt') package = 'rest_framework_jwt' description = 'JSON Web Token based authentication for Django REST framework' url = 'https://github.com/GetBlimp/django-rest-framework-jwt' author = 'Jose Padilla' author_email = 'jpadilla@getblimp.com' license = 'MIT' install_requires = [ 'PyJWT>=1.4.0,<2.0.0', ] def read(*paths): """ Build a file path from paths and return the contents. """ with open(os.path.join(*paths), 'r') as f: return f.read() def get_packages(package): """ Return root package and all sub-packages. """ return [dirpath for dirpath, dirnames, filenames in os.walk(package) if os.path.exists(os.path.join(dirpath, '__init__.py'))] def get_package_data(package): """ Return all files under the root package, that are not in a package themselves. """ walk = [(dirpath.replace(package + os.sep, '', 1), filenames) for dirpath, dirnames, filenames in os.walk(package) if not os.path.exists(os.path.join(dirpath, '__init__.py'))] filepaths = [] for base, filenames in walk: filepaths.extend([os.path.join(base, filename) for filename in filenames]) return {package: filepaths} if sys.argv[-1] == 'publish': if os.system('pip freeze | grep wheel'): print('wheel not installed.\nUse `pip install wheel`.\nExiting.') sys.exit() if os.system('pip freeze | grep twine'): print('twine not installed.\nUse `pip install twine`.\nExiting.') sys.exit() os.system('python setup.py sdist bdist_wheel') os.system('twine upload dist/*') shutil.rmtree('dist') shutil.rmtree('build') shutil.rmtree('djangorestframework_jwt.egg-info') print('You probably want to also tag the version now:') print(" git tag -a {0} -m 'version {0}'".format(version)) print(' git push --tags') sys.exit() setup( name=name, version=version, url=url, license=license, description=description, long_description=read('README.rst'), author=author, author_email=author_email, packages=get_packages(package), package_data=get_package_data(package), install_requires=install_requires, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Envir
onment', 'Framework :: Django', 'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Internet :: WWW/HTTP', ] )
"""Support for switches which integrates with other components.""" import logging import voluptuous as vol from homeassistant.components.switch import ( ENTITY_ID_FORMAT, PLATFORM_SCHEMA, SwitchEntity, ) from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME, CONF_ENTITY_PICTURE_TEMPLATE, CONF_ICON_TEMPLATE, CONF_SWITCHES, CONF_UNIQUE_ID, CONF_VALUE_TEMPLATE, STATE_OFF, STATE_ON, ) from homeassistant.core import callback from homeassistant.exceptions import TemplateError import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import async_generate_entity_id from homeassistant.helpers.reload import async_setup_reload_service from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.script import Script from .const import CONF_AVAILABILITY_TEMPLATE, DOMAIN, PLATFORMS from .template_entity import TemplateEntity _LOGGER = logging.getLogger(__name__) _VALID_STATES = [STATE_ON, STATE_OFF, "true", "false"] ON_ACTION = "turn_on" OFF_ACTION = "turn_off" SWITCH_SCHEMA = vol.Schema( { vol.Optional(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_ICON_TEMPLATE): cv.template, vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template, vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template, vol.Required(ON_ACTION): cv.SCRIPT_SCHEMA, vol.Required(OFF_ACTION): cv.SCRIPT_SCHEMA, vol.Optional(ATTR_FRIENDLY_NAME): cv.string, vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Optional(CONF_UNIQUE_ID): cv.string, } ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)} ) async def _async_create_entities(hass, config): """Create the Template switches.""" switches = [] for device, device_config in config[CONF_SWITCHES].items(): friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device) state_template = device_config.get(CONF_VALUE_TEMPLATE) icon_template = device_config.get(CONF_ICON_TEMPLATE) entity_picture_t
emplate = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE) availability_template = device_config.get(CONF_AVAILA
BILITY_TEMPLATE) on_action = device_config[ON_ACTION] off_action = device_config[OFF_ACTION] unique_id = device_config.get(CONF_UNIQUE_ID) switches.append( SwitchTemplate( hass, device, friendly_name, state_template, icon_template, entity_picture_template, availability_template, on_action, off_action, unique_id, ) ) return switches async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the template switches.""" await async_setup_reload_service(hass, DOMAIN, PLATFORMS) async_add_entities(await _async_create_entities(hass, config)) class SwitchTemplate(TemplateEntity, SwitchEntity, RestoreEntity): """Representation of a Template switch.""" def __init__( self, hass, device_id, friendly_name, state_template, icon_template, entity_picture_template, availability_template, on_action, off_action, unique_id, ): """Initialize the Template switch.""" super().__init__( availability_template=availability_template, icon_template=icon_template, entity_picture_template=entity_picture_template, ) self.entity_id = async_generate_entity_id( ENTITY_ID_FORMAT, device_id, hass=hass ) self._name = friendly_name self._template = state_template domain = __name__.split(".")[-2] self._on_script = Script(hass, on_action, friendly_name, domain) self._off_script = Script(hass, off_action, friendly_name, domain) self._state = False self._unique_id = unique_id @callback def _update_state(self, result): super()._update_state(result) if isinstance(result, TemplateError): self._state = None return self._state = result.lower() in ("true", STATE_ON) async def async_added_to_hass(self): """Register callbacks.""" if self._template is None: # restore state after startup await super().async_added_to_hass() state = await self.async_get_last_state() if state: self._state = state.state == STATE_ON # no need to listen for events else: self.add_template_attribute( "_state", self._template, None, self._update_state ) await super().async_added_to_hass() @property def name(self): """Return the name of the switch.""" return self._name @property def unique_id(self): """Return the unique id of this switch.""" return self._unique_id @property def is_on(self): """Return true if device is on.""" return self._state @property def should_poll(self): """Return the polling state.""" return False async def async_turn_on(self, **kwargs): """Fire the on action.""" await self._on_script.async_run(context=self._context) if self._template is None: self._state = True self.async_write_ha_state() async def async_turn_off(self, **kwargs): """Fire the off action.""" await self._off_script.async_run(context=self._context) if self._template is None: self._state = False self.async_write_ha_state() @property def assumed_state(self): """State is assumed, if no template given.""" return self._template is None
#!/usr/bin/env python3 from setuptools import setup, find_packages version = '0.2.4' setup( name='lolbuddy', version=version, descri
ption='a cli tool to update league of legends itemsets and ability order from champion.gg', author='Cyrus Roshan', author_email='hello@cyrusroshan.com', license='MIT', keywords=['lol', 'league', 'league of legends', 'item', 'ability'], url='https://github.com/CyrusRoshan/lolbuddy', packages=find_packages(), package_data={}, install_requires=[ 'requests-futures >= 0.9.5', ], entry_points={ 'console_scripts': [ 'lolbuddy=l
olbuddy:main', ], }, )
return val else: if not info_only: self.vended.setdefault(id(val), [val, 0])[1] += 1 t = hash(val) if isDictKey else None return ProxyInfo(self.endpoint, id(val), proxyhash=t).packed() def unpack(self, val, info_only = False): if ProxyInfo.isPacked(val): info = ProxyInfo.fromPacked(val) try: if self.endpoint == info.endpoint: try: obj = self.vended[info.remoteid][0] except KeyError: if not info_only: raise Exception("Whoops, "+self.endpoint+" can't find reference to object "+repr(info.remoteid)) else: info.dbgnote = 'missing local reference' return info if info.attrpath: for i in info.attrpath.split('.'): obj = getattr(obj, i) return obj else: return Proxy(self, info) if not info_only else info except: if not info_only: raise info.dbgnote = 'While unpacking, ' + ''.join(traceback.format_exc()) return info elif type(val) == tuple: return tuple(self.unpack(i, info_only) for i in val) elif type(val) == list: return [self.unpack(i, info_only) for i in val] elif type(val) == set: return {self.unpack(i, info_only) for i in val} elif type(val) == frozenset: return frozenset(self.unpack(i, info_only) for i in val) elif type(val) == dict: return {self.unpack(k, info_only):self.unpack(v, info_only) for k,v in val.iteritems()} elif type(val) == CodeType: return val else: return val def connectProxy(self): self.vended = {} self.sock.sendall('yo') chal = urandom(20) self.sock.sendall(chal) if self.sock.recv(20) != sha1(self.secret+chal).digest(): print >> sys.stderr, "Server failed challenge!" return None self.sock.sendall(sha1(self.secret+self.sock.recv(20)).digest()) return self.unpack(self.recvmsg()) def runServer(self, obj): if self.sock.recv(2) != 'yo': print >> sys.stderr, "Spurious connection!" return self.sock.sendall(sha1(self.secret+self.sock.recv(20)).digest()) cha
l = urandom(20) self.sock.sendall(chal) if self.sock.recv(20) != sha1(self.secret+chal).digest(): print >> sys.stderr, "Client failed challenge!" return None try: self.vended = {} self.sendmsg(
self.pack(obj)) while self.vended: self.handle(self.recvmsg()) except socket.error as e: if e.errno in (errno.EPIPE, errno.ECONNRESET): pass # Client disconnect is a non-error. else: raise finally: del self.vended def request(self, msg): self.sendmsg(msg) while True: x = self.recvmsg() if DEBUG: print >> sys.stderr, self.endpoint, self.unpack(x, True) if x[0] == 'ok': return self.unpack(x[1]) elif x[0] == 'exn': exntyp = exceptions.__dict__.get(x[1]) args = self.unpack(x[2]) trace = x[3] if exntyp and issubclass(exntyp, BaseException): if DEBUG: print >> sys.stderr, 'Remote '+''.join(trace) raise exntyp(*args) else: raise Exception(str(x[1])+repr(args)+'\nRemote '+''.join(trace)) else: self.handle(x) def handle(self, msg): if DEBUG: print >> sys.stderr, self.endpoint, self.unpack(msg, True) try: ret = { 'get' : self.handle_get, 'set' : self.handle_set, 'call' : self.handle_call, 'callattr' : self.handle_callattr, 'hash' : self.handle_hash, 'repr' : self.handle_repr, 'gc' : self.handle_gc, 'eval' : self.handle_eval, 'exec' : self.handle_exec, 'deffun' : self.handle_deffun, }[msg[0]](*msg[1:]) self.sendmsg(('ok', ret)) except: typ, val, tb = sys.exc_info() self.sendmsg(('exn', typ.__name__, self.pack(val.args), traceback.format_exception(typ, val, tb))) def get(self, proxy, attr): info = object.__getattribute__(proxy, '_proxyinfo') x, addlazy = self.request(('get', info.packed(), attr)) if addlazy: info.lazyattrs.add(attr) return x def handle_get(self, obj, attr): obj1 = self.unpack(obj) attr1 = getattr(obj1, attr) # Start of the "addlazy" perf hack, which may lead to incorrect behavior in some cases. addlazy = True addlazy = addlazy and type(attr1) not in (bool, int, long, float, complex, str, unicode, tuple, list, set, frozenset, dict) addlazy = addlazy and attr1 is not None try: addlazy = addlazy and not isinstance(getattr(obj1.__class__, attr), property) except: pass return self.pack(attr1), addlazy def set(self, proxy, attr, val): self.request(('set', object.__getattribute__(proxy, '_proxyinfo').packed(), attr, self.pack(val))) def handle_set(self, obj, attr, val): setattr(self.unpack(obj), attr, self.unpack(val)) def call(self, proxy, args, kwargs): return self.request(('call', object.__getattribute__(proxy, '_proxyinfo').packed(), self.pack(args or None), self.pack(kwargs or None))) def handle_call(self, obj, args, kwargs): return self.pack(self.unpack(obj)(*(self.unpack(args) or ()), **(self.unpack(kwargs) or {}))) def callattr(self, proxy, attr, args, kwargs): return self.request(('callattr', object.__getattribute__(proxy, '_proxyinfo').packed(), attr, self.pack(args or None), self.pack(kwargs or None))) def handle_callattr(self, obj, attr, args, kwargs): return self.pack(getattr(self.unpack(obj), attr)(*(self.unpack(args) or ()), **(self.unpack(kwargs) or {}))) def hash(self, proxy): return self.request(('hash', object.__getattribute__(proxy, '_proxyinfo').packed())) def handle_hash(self, obj): return self.pack(hash(self.unpack(obj))) def repr(self, proxy): return self.request(('repr', object.__getattribute__(proxy, '_proxyinfo').packed())) def handle_repr(self, obj): return self.pack(repr(self.unpack(obj))) def delete(self, proxy): info = object.__getattribute__(proxy, '_proxyinfo') if info.attrpath != '': return self.garbage.append(info.packed()) if len(self.garbage) > 50: try: self.request(('gc', tuple(self.garbage))) except socket.error: pass # No need for complaints about a dead connection self.garbage[:] = [] def handle_gc(self, objs): for obj in objs: try: info = ProxyInfo.fromPacked(obj) if info.endpoint != self.endpoint: continue assert info.attrpath == '' self.vended[info.remoteid][1] -= 1 if self.vended[info.remoteid][1] == 0: del self.vended[info.remoteid] elif self.vended[info.remoteid][1] < 0: print >> sys.stderr, "Too many releases on", self.unpack(obj, True), self.vended[info.remoteid][1] except: print >> sys.stderr, "Exception while releasing", self.unpack(obj, True) traceback.print_exc(sys.stderr) def disconnect(self): self.garbage = [] self.sock.close() def _eval(self, expr, local = None): ret, d = self.request(('eval', self.pack(expr), self.pack(local))) if local is not None: local.clear() local.update(d) return ret def handle_eval(self, expr, local): d = self.unpack(local) ret = eval(self.unpack(expr), globals(), d) return self.pack(ret), (self.pack(d) if d is not None else None) def _exec(self, stmt, local = None): d = self.request(('exec', self.pack(stmt), self.pack(local))) if local is not None: local.clear() local.update(d) def handle_exec(self, stmt, local): d = self.unpack(local) exec(self.unpack(stmt), globals(), d) return self.pack(d) if d is not None else None # Define a function on the remote side. Its __globals__ will be # the local client-side func.__globals__ filtered to the keys in # func_globals, underlaid with the remote server-side globals() # filtered to the keys in remote_globals. None is a special value # for the filters, and disables any filtering. def deffun(self, func, func_globals = (), remote_globals = None): glbls = {k:v for k,v in func.__globals__
# Copyright (c) 2015 Intel Research and Development Ireland Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import experimental_framework.benchmarking_unit as b_unit from experimental_framework import heat_template_generation, common class FrameworkApi(object): @staticmethod def init(): """ Initializes the Framework :return: None """ common.init(api=True) # @staticmethod # def get_available_test_cases(): # """ # Returns a list of available test cases. # This list include eventual modules developed by the user, if any. # Each test case is returned as a string that represents the full name # of the test case and that can be used to get more information # calling get_test_case_features(test_case_name) # # :return: list of strings # """ # return b_unit.BenchmarkingUnit.get_available_test_cases() @staticmethod def get_test_case_features(test_case): """ Returns a list of features (description, requested parameters, allowed values, etc.) for a specified test case. :param test_case: name of the test case (string) The string represents the test case and can be obtained calling "get_available_test_cases()" method. :return: dict() containing the features of the test case """ if not isinstance(test_case, str): raise ValueError('The provided test_case parameter has to be ' 'a string') benchmark = b_unit.BenchmarkingUnit.get_required_benchmarks( [test_case])[0] return benchmark.get_features() @staticmethod def execute_framework( test_cases, iterations, heat_template, heat_template_parameters, deployment_configuration, openstack_credentials ): """ Executes the framework according the inputs :param test_cases: Test cases to be ran on the workload (dict() of dict()) Example: test_case = dict() test_case['name'] = 'module.Class' test_case['params'] = dict() test_case['params']['throughput'] = '1' test_case['params']['vlan_sender'] = '1007' tes
t_case['params']['vlan_receiver'] = '1006' test_cases = [test_case] :param iterations: Number of cycles to be executed (int) :param heat_template: (string) File name of the heat template of the workload to be deployed. It contains the parameters to be
evaluated in the form of #parameter_name. (See heat_templates/vTC.yaml as example). :param heat_template_parameters: (dict) Parameters to be provided as input to the heat template. See http://docs.openstack.org/developer/heat/ template_guide/hot_guide.html - section "Template input parameters" for further info. :param deployment_configuration: ( dict[string] = list(strings) ) ) Dictionary of parameters representing the deployment configuration of the workload The key is a string corresponding to the name of the parameter, the value is a list of strings representing the value to be assumed by a specific param. The parameters are user defined: they have to correspond to the place holders (#parameter_name) specified in the heat template. :return: dict() Containing results """ common.init(api=True) # Input Validation common.InputValidation.validate_os_credentials(openstack_credentials) credentials = openstack_credentials msg = 'The provided heat_template does not exist' if common.RELEASE == 'liberty': heat_template = 'vTC_liberty.yaml' else: heat_template = 'vTC.yaml' template = "{}{}".format(common.get_template_dir(), heat_template) common.InputValidation.validate_file_exist(template, msg) msg = 'The provided iterations variable must be an integer value' common.InputValidation.validate_integer(iterations, msg) msg = 'The provided heat_template_parameters variable must be a ' \ 'dictionary' common.InputValidation.validate_dictionary(heat_template_parameters, msg) log_msg = "Generation of all the heat templates " \ "required by the experiment" common.LOG.info(log_msg) heat_template_generation.generates_templates(heat_template, deployment_configuration) benchmarking_unit = \ b_unit.BenchmarkingUnit( heat_template, credentials, heat_template_parameters, iterations, test_cases) try: common.LOG.info("Benchmarking Unit initialization") benchmarking_unit.initialize() common.LOG.info("Benchmarking Unit Running") results = benchmarking_unit.run_benchmarks() finally: common.LOG.info("Benchmarking Unit Finalization") benchmarking_unit.finalize() return results
django.contrib.auth.models import AnonymousUser from django.contrib.sites.models import Site from django.core.urlresolvers import reverse, NoReverseMatch, resolve, Resolver404 from django.db.models import Q from django.utils.translation import override as force_language, ugettext_lazy as _ from cms.api import get_page_draft, can_change_page from cms.constants import TEMPLATE_INHERITANCE_MAGIC, PUBLISHER_STATE_PENDING from cms.models import Placeholder, Title, Page, PageType, StaticPlaceholder from cms.toolbar.items import ButtonList, TemplateItem, REFRESH_PAGE from cms.toolbar_base import CMSToolbar from cms.toolbar_pool import toolbar_pool from cms.utils import get_language_from_request, page_permissions from cms.utils.conf import get_cms_setting from cms.utils.i18n import get_language_tuple, get_language_dict from cms.utils.page_permissions import ( user_can_change_page, user_can_delete_page, user_can_publish_page, ) from cms.utils.urlutils import add_url_parameters, admin_reverse from menus.utils import DefaultLanguageChanger # Identifiers for search ADMIN_MENU_IDENTIFIER = 'admin-menu' LANGUAGE_MENU_IDENTIFIER = 'language-menu' TEMPLATE_MENU_BREAK = 'Template Menu Break' PAGE_MENU_IDENTIFIER = 'page' PAGE_MENU_ADD_IDENTIFIER = 'add_page' PAGE_MENU_FIRST_BREAK = 'Page Menu First Break' PAGE_MENU_SECOND_BREAK = 'Page Menu Second Break' PAGE_MENU_THIRD_BREAK = 'Page Menu Third Break' PAGE_MENU_FOURTH_BREAK = 'Page Menu Fourth Break' PAGE_MENU_LAST_BREAK = 'Page Menu Last Break' HISTORY_MENU_BREAK = 'History Menu Break' MANAGE_PAGES_BREAK = 'Manage Pages Break' ADMIN_SITES_BREAK = 'Admin Sites Break' ADMINISTRATION_BREAK = 'Administration Break' CLIPBOARD_BREAK = 'Clipboard Break' USER_SETTINGS_BREAK = 'User Settings Break' ADD_PAGE_LANGUAGE_BREAK = "Add page language Break" REMOVE_PAGE_LANGUAGE_BREAK = "Remove page language Break" COPY_PAGE_LANGUAGE_BREAK = "Copy page language Break" TOOLBAR_DISABLE_BREAK = 'Toolbar disable Break' SHORTCUTS_BREAK = 'Shortcuts Break' @toolbar_pool.register class PlaceholderToolbar(CMSToolbar): """ Adds placeholder edit buttons if placeholders or static placeholders are detected in the template """ def populate(self): self.page = get_page_draft(self.request.current_page) def post_template_populate(self): super(PlaceholderToolbar, self).post_template_populate() self.add_wizard_button() def add_wizard_button(self): from cms.wizards.wizard_pool import entry_choices title = _("Create") if self.page: user = self.request.user page_pk = self.page.pk disabled = len(list(entry_choices(user, self.page))) == 0 else: page_pk = '' disabled = True url = '{url}?page={page}&language={lang}&edit'.format( url=reverse("cms_wizard_create"), page=page_pk, lang=self.toolbar.site_language, ) self.toolbar.add_modal_button(title, url, side=self.toolbar.RIGHT, disabled=disabled, on_close=REFRESH_PAGE) @toolbar_pool.register class BasicToolbar(CMSToolbar): """ Basic Toolbar for site and languages menu """ page = None _language_menu = None _admin_menu = None def init_from_request(self): self.page = get_page_draft(self.request.current_page) def populate(self): if not self.page: self.init_from_request() self.clipboard = self.request.toolbar.user_settings.clipboard self.add_admin_menu() self.add_language_menu() def add_admin_menu(self): if not self._admin_menu: self._admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, self.current_site.name) # Users button self.add_users_button(self._admin_menu) # sites menu sites_queryset = Site.objects.order_by('name') if len(sites_queryset) > 1: sites_menu = self._admin_menu.get_or_create_menu('sites', _('Sites')) sites_menu.add_sideframe_item(_('Admin Sites'), url=admin_reverse('sites_site_changelist')) sites_menu.add_break(ADMIN_SITES_BREAK) for site in sites_queryset: sites_menu.add_link_item(site.name, url='http://%s' % site.domain, active=site.pk == self.current_site.pk) # admin self._admin_menu.add_sideframe_item(_('Administration'), url=admin_reverse('index')) self._admin_menu.add_break(ADMINISTRATION_BREAK) # cms users settings self._admin_menu.add_sideframe_item(_('User settings'), url=admin_reverse('cms_usersettings_change')) self._admin_menu.add_break(USER_SETTINGS_BREAK) # clipboard if self.toolbar.edit_mode_active: # True if the clipboard exists and there's plugins in it. clipboard_is_bound = self.toolbar.clipboard_plugin self._admin_menu.add_link_item(_('Clipboard...'), url='#', extra_classes=['cms-clipboard-trigger'], disabled=not clipboard_is_bound) self._admin_menu.add_link_item(_('Clear clipboard'), url='#', extra_class
es=['cms-clipboard-empty'], disabled=not clipboard_is_bound) self._admin_menu.add_break(CLIPBOARD_BREAK) # Disable toolbar self._admin_menu.add_link_item(_('Disable toolbar'), url='?%s' % get_cms_setting('CMS_TOOLBAR_URL__DISABLE')) self._admin_menu.add_break(TOOLBAR_DISAB
LE_BREAK) self._admin_menu.add_link_item(_('Shortcuts...'), url='#', extra_classes=('cms-show-shortcuts',)) self._admin_menu.add_break(SHORTCUTS_BREAK) # logout self.add_logout_button(self._admin_menu) def add_users_button(self, parent): User = get_user_model() if User in admin.site._registry: opts = User._meta if self.request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename('change', opts))): user_changelist_url = admin_reverse('%s_%s_changelist' % (opts.app_label, opts.model_name)) parent.add_sideframe_item(_('Users'), url=user_changelist_url) def add_logout_button(self, parent): # If current page is not published or has view restrictions user is redirected to the home page: # * published page: no redirect # * unpublished page: redirect to the home page # * published page with login_required: redirect to the home page # * published page with view permissions: redirect to the home page page_is_published = self.page and self.page.is_published(self.current_lang) if page_is_published and not self.page.login_required: anon_can_access = page_permissions.user_can_view_page( user=AnonymousUser(), page=self.page, site=self.current_site, ) else: anon_can_access = False on_success = self.toolbar.REFRESH_PAGE if anon_can_access else '/' # We'll show "Logout Joe Bloggs" if the name fields in auth.User are completed, else "Logout jbloggs". If # anything goes wrong, it'll just be "Logout". user_name = self.get_username() logout_menu_text = _('Logout %s') % user_name if user_name else _('Logout') parent.add_ajax_item( logout_menu_text, action=admin_reverse('logout'), active=True, on_success=on_success, method='GET', ) def add_language_menu(self): if settings.USE_I18N and not self._language_menu: self._language_menu = self.toolbar.get_or_create_menu(LANGUAGE_MENU_IDENTIFIER, _('Language'), position=-1) language_changer = getattr(self.request, '_language_changer', Default
`obj.foo` can be used in addition to `obj['foo']`. Raises Attribute/Key errors for missing references. >>> o = Storage(a=1, b=2) >>> assert(o.a == o['a']) >>> assert(o.b == o['b']) >>> o.a = 2 >>> print o['a'] 2 >>> x = o.copy() >>> assert(x == o) >>> del o.a >>> print o.a Traceback (most recent call last): ... AttributeError: a >>> print o['a'] Traceback (most recent call last): ... KeyError: 'a' >>> o._get_fields() Traceback (most recent call last): ... TypeError: ... """ def __getattr__(self, key): if key in self: return self[key] else: raise AttributeError(key) def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): if key in self: del self[key] else: raise AttributeError(key) def __repr__(self): return '%s(%s)' % (self.__class__.__name__, dict.__repr__(self)) @classmethod def _get_fields(cls): """Return class' __init__() args excluding `self`. Assumes that calling class has actually implemented __init__(), otherwise, this will fail. """ # For classes, first element of args == self which we don't want. return getargspec(cls.__init__).args[1:] ################################################## # Get recipe related models ################################################## class Recipe(Storage): """Recipe model.""" def __init__(self, **kargs): self.id = kargs['id'] self.name = kargs['name'] self.rating = kargs.get('rating') self.totalTime = kargs.get('totalTime') or 0 self.totalTimeInSeconds = kargs.get('totalTimeInSeconds') or 0 self.ingredientLines = kargs.get('ingredientLines') or [] self.numberOfServings = kargs.get('numberOfServings') self.yields = kargs.get('yields') self.attributes = kargs.get('attributes') or {} self.source = RecipeSource(**(kargs.get('source') or {})) self.attribution = Attribution(**(kargs.get('attribution') or {})) # NOTE: For `flavors`, the keys are returned capitalized so normalize # to lowercase since search results' flavor keys are lowercase. flavors = kargs.get('flavors') or {} self.flavors = Flavors(**{key.lower(): value for key, value in flavors.iteritems()}) self.nutritionEstimates = [NutritionEstimate(**nute) for nute in (kargs.get('nutritionEstimates') or [])] self.images = [RecipeImages(**imgs) for imgs in (kargs.get('images') or [])] class Flavors(Storage): """Flavors model.""" def __init__(self, **kargs): self.salty = kargs.get('salty') self.meaty = kargs.get('meaty') self.piquant = kargs.get('piquant') self.bitter = kargs.get('bitter') self.sour = kargs.get('sour') self.sweet = kargs.get('sweet') class Attribution(Storage): """Attribution model.""" def __init__(self, **kargs): self.html = kargs.get('html') self.url = kargs.get('url') self.text = kargs.get('text') self.logo = kargs.get('logo') class NutritionEstimate(Storage): """Nutrition estimate model.""" def __init__(self, **kargs): self.attribute = kargs.get('attribute') self.description = kargs.get('description') self.value = kargs.get('value') self.unit = NutritionUnit(**(kargs.get('unit') or {})) class NutritionUnit(Storage): """Nutrition unit model.""" def __init__(self, **
kargs): self.id = kargs['id'] self.abbreviation = kargs.get('abbreviation') self.plural = kargs.get('plural') self.pluralAbbreviation = kargs.get('pluralAbbreviation') class RecipeImages(Storage): """Recipe images model.""" def __init__(self, **kargs): self.hostedLargeUrl = kargs.get('hostedLargeUrl')
self.hostedSmallUrl = kargs.get('hostedSmallUrl') class RecipeSource(Storage): """Recipe source model.""" def __init__(self, **kargs): self.sourceRecipeUrl = kargs.get('sourceRecipeUrl') self.sourceSiteUrl = kargs.get('sourceSiteUrl') self.sourceDisplayName = kargs.get('sourceDisplayName') ################################################## # Search related models ################################################## class SearchResult(Storage): """Search result model.""" def __init__(self, **kargs): self.totalMatchCount = kargs['totalMatchCount'] self.criteria = SearchCriteria(**kargs['criteria']) self.facetCounts = kargs['facetCounts'] self.matches = [SearchMatch(**match) for match in kargs['matches']] self.attribution = Attribution(**kargs['attribution']) class SearchMatch(Storage): """Search match model.""" def __init__(self, **kargs): self.id = kargs['id'] self.recipeName = kargs['recipeName'] self.rating = kargs.get('rating') self.totalTimeInSeconds = kargs.get('totalTimeInSeconds', 0) self.ingredients = kargs.get('ingredients') self.flavors = Flavors(**(kargs.get('flavors') or {})) self.smallImageUrls = kargs.get('smallImageUrls') self.sourceDisplayName = kargs.get('sourceDisplayName', '') self.attributes = kargs.get('attributes') class SearchCriteria(Storage): """Search criteria model.""" def __init__(self, **kargs): self.maxResults = kargs.get('maxResults') self.resultsToSkip = kargs.get('resultsToSkip') self.terms = kargs.get('terms') self.requirePictures = kargs.get('requirePictures') self.facetFields = kargs.get('facetFields') self.allowedIngredients = kargs.get('allowedIngredients') self.excludedIngredients = kargs.get('excludedIngredients') self.attributeRanges = kargs.get('attributeRanges', {}) self.allowedAttributes = kargs.get('allowedAttributes', []) self.excludedAttributes = kargs.get('excludedAttributes', []) self.allowedDiets = kargs.get('allowedDiets', []) self.nutritionRestrictions = kargs.get('nutritionRestrictions', {}) ################################################## # Metadata related models ################################################## class MetaAttribute(Storage): """Base class for metadata attributes.""" def __init__(self, **kargs): self.id = kargs['id'] self.description = kargs['description'] self.localesAvailableIn = kargs['localesAvailableIn'] self.name = kargs['name'] self.searchValue = kargs['searchValue'] self.type = kargs['type'] class MetaHoliday(MetaAttribute): """Holiday metadata model.""" pass class MetaCuisine(MetaAttribute): """Cuisine metadata model.""" pass class MetaCourse(MetaAttribute): """Course metadata model.""" pass class MetaTechnique(MetaAttribute): """Technique metadata model.""" pass class MetaSource(Storage): """Source metadata model.""" def __init__(self, **kargs): self.faviconUrl = kargs['faviconUrl'] self.description = kargs['description'] self.searchValue = kargs['searchValue'] class MetaBrand(Storage): """Brand metadata model.""" def __init__(self, **kargs): self.faviconUrl = kargs['faviconUrl'] self.description = kargs['description'] self.searchValue = kargs['searchValue'] class MetaDiet(Storage): """Diet metadata model.""" def __init__(self, **kargs): self.id = kargs['id'] self.localesAvailableIn = kargs['localesAvailableIn'] self.longDescription = kargs['longDescription'] self.searchValue = kargs['searchValue'] self.shortDescription = kargs['shortDescription'] self.type = kargs['type'] class MetaAllergy(Storage): """Allergy metadata model.""" def __init__(self, **kargs): self.id = kargs['id']
return last_visit < cutoff def should_process_digest(realm_str): # type: (str) -> bool if realm_str in settings.SYSTEM_ONLY_REALMS: # Don't try to send emails to system-only realms return False return True # Changes to this should also be reflected in # zerver/worker/queue_processors.py:DigestWorker.consume() def queue_digest_recipient(user_profile, cutoff): # type: (UserProfile, datetime.datetime) -> None # Convert cutoff to epoch seconds for transit. event = {"user_profile_id": user_profile.id, "cutoff": cutoff.strftime('%s')} queue_json_publish("digest_emails", event, lambda event: None) def enqueue_emails(cutoff): # type: (datetime.datetime) -> None # To be really conservative while we don't have user timezones or # special-casing for companies with non-standard workweeks, only # try to send mail on Tuesdays. if timezone_now().weekday() != VALID_DIGEST_DAY: return for realm in Realm.objects.filter(deactivated=False, show_digest_email=True): if not should_process_digest(realm.string_id): continue user_profiles = UserProfile.objects.filter( realm=realm, is_active=True, is_bot=False, enable_digest_emails=True) for user_profile in user_profiles: if inactive_since(user_profile, cutoff): queue_digest_recipient(user_profile, cutoff) logger.info("%s is inactive, queuing for potential digest" % ( user_profile.email,)) def gather_hot_conversations(user_profile, stream_messages): # type: (UserProfile, QuerySet) -> List[Dict[str, Any]] # Gather stream conversations of 2 types: # 1. long conversations # 2. conversations where many different people participated # # Returns a list of dictionaries containing the templating # information for each hot conversation. conversation_length = defaultdict(int) # type: Dict[Tuple[int, Text], int] conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, Text], Set[Text]] for user_message in stream_messages: if not user_message.message.sent_by_human(): # Don't include automated messages in the count.
continue key = (user_message.message.recipient.type_id, user_message.message.subject) conversation_diversity[key].add( user_message.message.sender.full_name) conversation_length[key] += 1 diversity_list = list(conversation_diversity.items()) diversity_list.sort(key=lambda entry: len(entry[1]), reverse=True) length_list = list(conversation_length.items()) length_list.sort(key=lambda entry: entry[1], reverse=True) # Get up to th
e 4 best conversations from the diversity list # and length list, filtering out overlapping conversations. hot_conversations = [elt[0] for elt in diversity_list[:2]] for candidate, _ in length_list: if candidate not in hot_conversations: hot_conversations.append(candidate) if len(hot_conversations) >= 4: break # There was so much overlap between the diversity and length lists that we # still have < 4 conversations. Try to use remaining diversity items to pad # out the hot conversations. num_convos = len(hot_conversations) if num_convos < 4: hot_conversations.extend([elt[0] for elt in diversity_list[num_convos:4]]) hot_conversation_render_payloads = [] for h in hot_conversations: stream_id, subject = h users = list(conversation_diversity[h]) count = conversation_length[h] # We'll display up to 2 messages from the conversation. first_few_messages = [user_message.message for user_message in stream_messages.filter( message__recipient__type_id=stream_id, message__subject=subject)[:2]] teaser_data = {"participants": users, "count": count - len(first_few_messages), "first_few_messages": build_message_list( user_profile, first_few_messages)} hot_conversation_render_payloads.append(teaser_data) return hot_conversation_render_payloads def gather_new_users(user_profile, threshold): # type: (UserProfile, datetime.datetime) -> Tuple[int, List[Text]] # Gather information on users in the realm who have recently # joined. if user_profile.realm.is_zephyr_mirror_realm: new_users = [] # type: List[UserProfile] else: new_users = list(UserProfile.objects.filter( realm=user_profile.realm, date_joined__gt=threshold, is_bot=False)) user_names = [user.full_name for user in new_users] return len(user_names), user_names def gather_new_streams(user_profile, threshold): # type: (UserProfile, datetime.datetime) -> Tuple[int, Dict[str, List[Text]]] if user_profile.realm.is_zephyr_mirror_realm: new_streams = [] # type: List[Stream] else: new_streams = list(get_active_streams(user_profile.realm).filter( invite_only=False, date_created__gt=threshold)) base_url = u"%s/ # narrow/stream/" % (user_profile.realm.uri,) streams_html = [] streams_plain = [] for stream in new_streams: narrow_url = base_url + hash_util_encode(stream.name) stream_link = u"<a href='%s'>%s</a>" % (narrow_url, stream.name) streams_html.append(stream_link) streams_plain.append(stream.name) return len(new_streams), {"html": streams_html, "plain": streams_plain} def enough_traffic(unread_pms, hot_conversations, new_streams, new_users): # type: (Text, Text, int, int) -> bool if unread_pms or hot_conversations: # If you have any unread traffic, good enough. return True if new_streams and new_users: # If you somehow don't have any traffic but your realm did get # new streams and users, good enough. return True return False def handle_digest_email(user_profile_id, cutoff): # type: (int, float) -> None user_profile = get_user_profile_by_id(user_profile_id) # We are disabling digest emails for soft deactivated users for the time. # TODO: Find an elegant way to generate digest emails for these users. if user_profile.long_term_idle: return None # Convert from epoch seconds to a datetime object. cutoff_date = datetime.datetime.fromtimestamp(int(cutoff), tz=pytz.utc) all_messages = UserMessage.objects.filter( user_profile=user_profile, message__pub_date__gt=cutoff_date).order_by("message__pub_date") context = common_context(user_profile) # Start building email template data. context.update({ 'realm_name': user_profile.realm.name, 'name': user_profile.full_name, 'unsubscribe_link': one_click_unsubscribe_link(user_profile, "digest") }) # Gather recent missed PMs, re-using the missed PM email logic. # You can't have an unread message that you sent, but when testing # this causes confusion so filter your messages out. pms = all_messages.filter( ~Q(message__recipient__type=Recipient.STREAM) & ~Q(message__sender=user_profile)) # Show up to 4 missed PMs. pms_limit = 4 context['unread_pms'] = build_message_list( user_profile, [pm.message for pm in pms[:pms_limit]]) context['remaining_unread_pms_count'] = min(0, len(pms) - pms_limit) home_view_recipients = [sub.recipient for sub in Subscription.objects.filter( user_profile=user_profile, active=True, in_home_view=True)] stream_messages = all_messages.filter( message__recipient__type=Recipient.STREAM, message__recipient__in=home_view_recipients) # Gather hot conversations. context["hot_conversations"] = gather_hot_conversations( user_profile, stream_messages) # Gather new streams. new_st
# -*- coding: utf-8 -*- # Copyright 2015 Objectif Libre # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Stéphane Albert # import os from gabbi import driver from cloudkitty.tests.gabbi import fixtures from cloudkitty.tests.gabbi.rating.pyscripts import fixtures as py_fixtures TESTS_DIR = 'gabbits' def load_tests(loader, tests, patte
rn): test_dir = os.path.join(os.p
ath.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=None, intercept=fixtures.setup_app, fixture_module=py_fixtures)
from time import sleep from worker import delayable import requests @delayable def add(x, y, delay=None): #
Simulate your work here,
preferably something interesting so Python doesn't sleep sleep(delay or (x + y if 0 < x + y < 5 else 3)) return x + y @delayable def get(*args, **kwargs): r = requests.get(*args, **kwargs) return r.content
# Use this script to collapse a multi-level folder of images into one folder. import fnmatch import os import shutil folderName = 'unknown' matches = [] for root, dirnames, filenames in os.walk(folderName): for filename in fnmatch
.filter(filenames, '*.jpg'): matches.append(os.path.join(root, filename)) idx = 0 for match in matches: print match shutil.move('./' + match,
'./' + folderName + '/' + str(idx) + '.jpg') idx = idx + 1
# -*- coding: UTF-8 -*- import re def mgo_text_split(query_text): ''' split text to support mongodb $text match on a phrase ''' sep = r'[`\-=~!@#$%^&*()_+\[\]{};\'\\:"|<,./<>?]' word_lst = re.split(sep, query_text) text_query = ' '.join('\"{}\"'.format(w) for w in word_lst) return text_query # 搜索逻辑 def querylogic(list): query = {} if len(list) > 1 or len(list[0].split(':')) > 1: for _ in list: if _.find(':') > -1: q_key, q_value = _.split(':', 1) if q_key == 'port': query['port'] = int(q_value) elif q_key == 'banner': zhPattern = re.compile(u'[\u4e00-\u9fa5]+') contents = q_value match = zhPattern.search(contents) # 如果没有中文用全文索引 if match: query['banner'] = {"$regex": q_value, '$options': 'i'} else: text_query = mgo_text_split(q_value) query['$text'] = {'$search': text_query, '$caseSensitive':True} elif q_key == 'ip': query['ip'] = {"$regex": q_value} elif q_key == 'server': query['server'] = q_value.lower() elif q_key == 'title': query['webinfo.title'] = {"$regex": q_value, '$options': 'i'} elif q_key == 'tag': query['webinfo.tag'] = q_value.lower() elif q_key == 'hostname': query['hostname'] = {"$regex": q_value, '$options': 'i'} elif q_key == 'all': filter_lst = [] for i in ('ip', 'banner', 'port', 'time', 'webinfo.tag', 'webinfo.title', 'server', 'hostname'): filter_lst.append({i: {"$regex": q_value, '$options': 'i'}}) query['$or'] = filter_lst else: query[q_key] = q_value else: filter_lst = [] for i in ('ip', 'banner', 'port', 'time', 'webinfo.tag', 'webinfo.title'
, 'server', 'hostname'): fil
ter_lst.append({i: {"$regex": list[0], '$options': 'i'}}) query['$or'] = filter_lst return query
# 005_cleaner.py ##################################################################### ################################## # Import des modules et ajout du path de travail pour import relatif import sys sys.path.insert(0 , 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/') from voca import AddLog , StringFormatter , OutFileCreate , OdditiesFinder ################################## # Init des paths et noms de fichiers missionName = '005' AddLog('title' , '{} : Début du nettoyage du fichier'.format(missionName)) work_dir = 'C:/Users/WILLROS/Perso/
Shade/scripts/LocalWC-Shade-App/apis/raw/{}_raw/'.format(missionName) # Nom du fichier source raw_file = 'src' ################################## # retreiving raw string raw_string_wi
th_tabs = open(work_dir + raw_file , 'r').read() # replacing tabs with carriage return raw_string_with_cr = raw_string_with_tabs.replace( '\t', '\n' ) # turning the string into a list raw_list = raw_string_with_cr.splitlines() # going through oddities finder AddLog('subtitle' , 'Début de la fonction OdditiesFinder') list_without_oddities = OdditiesFinder( raw_list ) # going through string formatter ref_list = [] AddLog('subtitle' , 'Début de la fonction StringFormatter') for line in list_without_oddities: ref_list.append( StringFormatter( line ) ) ################################## # Enregistrement des fichiers sortie AddLog('subtitle' , 'Début de la fonction OutFileCreate') OutFileCreate('C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/out/','{}_src'.format(missionName),ref_list,'prenoms masculins italiens')
P
ER_PAGE = 50 ANNOUNCE_INTERVAL = 30
0
import pygame import os from buffalo import utils from item import Item # User interface for trading with NPCs # Similar to the crafting UI, with some minor differences # The biggest thing is that it only appears when you "talk to" (read click on) # A trader NPC and disappears when you leave that window, and only contains a # Limited number of trades class TradingUI: BUTTON_SIZE = 32 PADDING = 6 def __init__(self, inventory, tradeSet): self.tradeSet = tradeSet self.inventory = inventory self.surface = utils.empty_surface((228,500)) self.surface.fill((100,100,100,100)) self.pos = (utils.SCREEN_W / 2 + self.surface.get_width() / 2 + 350, utils.SCREEN_H / 2 - 150) self.tileRects = list() self.tileTrades = list() self.updateTradeTable() def updateTradeTable(self): self.surface = utils.empty_surface((228,500)) self.surface.fill((100,100,100,100)) self.tileRects = list() self.tileTrades = list() tradeTiles = list() total_y = 0 for t in self.tradeSet: newTile = self.generateTradeTile(t) tradeTiles.append(newTile) self.tileRects.append(pygame.Rect(0, total_y, newTile.get_width(), newTile.get_he
ight())) self.tileTrades.append(t) total_y += newTile.get_height() newSurface = utils.empty_surface((228, total_y)) newSurface.fill((100,100,100,255)) currY = 0 for surf in tradeTiles: newSurface.blit(surf, (0, currY)) currY += surf.get_height() self.surface = newSurface def ge
nerateTradeTile(self, trade): y_length = 36 * (len(trade.price.keys()) / 3) + 78; newScreen = utils.empty_surface((228, y_length)) for num, item in enumerate(trade.price.keys()): x = ((num % 3) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING y = ((num / 3) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING itemSurface = pygame.Surface.copy(Item(item, quantity = trade.price[item]).surface) if self.inventory.getTotalItemQuantity(item) < trade.price[item]: itemSurface.fill(pygame.Color(255,0,0,250)[0:3] + (0,), None, pygame.BLEND_RGBA_ADD) newScreen.blit(itemSurface, (x,y)) for num, item in enumerate(trade.goods.keys()): x = 192 - (((num % 2) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING) y = ((num / 2) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING newScreen.blit(Item(item, quantity = trade.goods[item]).surface, (x,y)) path = os.path.join(os.path.join(*list(['assets'] + ['items'] + ["arrow.png"]))) arrowSurface = pygame.image.load(path) newScreen.blit(arrowSurface,(114, (newScreen.get_height() / 2) - arrowSurface.get_height() / 2)) myfont = pygame.font.SysFont("monospace", 15) color = (255,255,0) if not trade.canTrade(self.inventory): color = (255,0,0) label = myfont.render(str(trade.name), 1, color) newScreen.blit(label, (newScreen.get_width() - label.get_width() - 2, newScreen.get_height() - label.get_height() - 2)) pygame.draw.rect(newScreen, (0,0,0,255), pygame.Rect(0,0,228, y_length), 1) return newScreen def blit(self, dest, pos): dest.blit(self.surface, pos) def update(self): pass def mouseDown(self, pos): for tile in self.tileRects: if(tile.collidepoint(pos)): clickedTrade = self.tileTrades[self.tileRects.index(tile)] if not clickedTrade.canTrade(self.inventory): return for item in clickedTrade.price.keys(): self.inventory.removeItemQuantity(item, clickedTrade.price[item]) for item in clickedTrade.goods.keys(): newItem = Item(item) newItem.quantity = clickedTrade.goods[item] self.inventory.addItem(newItem) self.inventory.update() self.updateTradeTable() return
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) from datetime import datetime from os import environ from requests import get from bs4 import BeautifulSoup, NavigableString from icalendar import Calendar, Event # Pour faire du TLS1.0... environ["OPENSSL_CONF"] = "openssl.cnf" def scrape_url(url): page = get(url) result = [ ] for div in BeautifulSoup(page.text, 'lxml').find_all('div', class_='infos_colonne_box'): rows = div.find_all('tr') if rows: headers = [ ' '.join(x.stripped_strings) for x in rows[0] if not isinstance(x, NavigableString) ] for row in rows[1:]: result.append( dict(zip(headers, [ x for x in row if not isinstance(x, NavigableString) ])) ) return result def create_event_formation(d): event = Event() dates = tuple(d['Date'].stripped_strings) num = ''.join(d['N°'].stripped_strings) event.add('summary', ' '.join(d['Nom'].stripped_strings)) event.add('dtstart', datetime.strptime(dates[0], '%d/%m/%y').date()) if len(dates) > 1: event.add('dtend', datetime.strptime(dates[1], '%d/%m/%y').date()) event.add('location', ' '.
join(d['Lieu'].stripped_strings).replace("\r\n", ' ')) event.add('uid', "%s@formation.ffme.fr" % (num,) ) event.add('description', 'http://www.ffme.fr/formation
/fiche-evenement/%s.html' % (num, )) return event def create_event_compet(d): event = Event() nom_lieu = tuple(d['Nom de la compétition Lieu'].stripped_strings) dates = tuple(d['Date'].stripped_strings) link = 'http://www.ffme.fr'+d['Nom de la compétition Lieu'].a.get('href') event.add('summary', nom_lieu[0]) event.add('location', nom_lieu[1]) event.add('dtstart', datetime.strptime(dates[0], '%d/%m/%y').date()) if len(dates) > 1: event.add('dtend', datetime.strptime(dates[1], '%d/%m/%y').date()) event.add('uid', "%s@competition.ffme.fr" % (''.join(( a for a in link if a.isdigit())),) ) event.add('description', link) return event cal = Calendar() cal.add('prodid', '-//Calendrier formations FFME//ffme.fr//') cal.add('version', '2.0') cal.add("X-WR-CALNAME", "Calendrier formations FFME") urls = ('http://www.ffme.fr/formation/calendrier-liste/FMT_ESCSAE.html', 'http://www.ffme.fr/formation/calendrier-liste/FMT_ESCSNE.html', #'http://www.ffme.fr/formation/calendrier-liste/FMT_ESCFCINI.html', 'http://www.ffme.fr/formation/calendrier-liste/FMT_ESCMONESP.html', ) for u in urls: for d in scrape_url(u): cal.add_component(create_event_formation(d)) with open('cal_formation.ics', 'w') as f: f.write(cal.to_ical().decode('utf-8')) cal = Calendar() cal.add('prodid', '-//Calendrier compétitions FFME//ffme.fr//') cal.add('version', '2.0') cal.add("X-WR-CALNAME", "Calendrier compétitions FFME") url = 'http://www.ffme.fr/competition/calendrier-liste.html?DISCIPLINE=ESC&CPT_FUTUR=1' page = 1 while True: data = scrape_url(url + "&page=" + repr(page)) if not data: break for d in data: cal.add_component(create_event_compet(d)) page +=1 with open('cal_competition.ics', 'w') as f: f.write(cal.to_ical().decode('utf-8'))
import sys if __name__ == "__main__": # Parse command line arguments if len(sys.argv) < 2: sys.exit("python {} <datasetFilename> {{<maxPoints>}}".format(sys.argv[0])) datasetFilename = sys.argv[1] if len(sys.argv) >= 3: maxPoints = int(sys.argv[2]) else: maxPoints = None # Perform initial pass through file to determine line count (i.e. # of points) lineCount = 0 with open(data
setFilename, "r") as f: line = f.readline() while line: lineCount += 1 line = f.readline() # Read first line and use to make assumption about the dimensionality of each point numDimensions = 0 with open(datasetFilename, "r") as f: firstLine = f.readline() numDimensions = len(firstLine.split()) # If dimensionality of d
ataset is 0, print error message and exit if numDimensions == 0: sys.exit("Could not determine dimensionality of dataset") # Print initial header at END of file (so we have number of points already) if maxPoints: numPoints = min(lineCount, maxPoints) else: numPoints = lineCount print("{} {}".format(numDimensions, numPoints)) # Output dataset header which defines dimensionality of data and number of points # Read entire file line-by-line, printing out each line as a point with open(datasetFilename, "r") as f: pointsRead = 0 line = f.readline() while line: fields = line.split() floatFields = [ str(float(x)) for x in fields ] print(" ".join(floatFields)) # Stop reading file is maximum number of points have been read pointsRead += 1 if maxPoints and pointsRead >= maxPoints: break # Read next line of file line = f.readline()
_x * tile_x + wid.x, pos_y * tile_y + wid.y), size=(tile_x, tile_y)) Color(1, 1, 1, 1) Rectangle(texture=self.trash_img.texture, pos=( pos_x * tile_x + wid.x, pos_y * tile_y + wid.y ), size=(tile_x, tile_y)) elif isinstance(thing, Clean): Color(0.1, 0.5, 0.1) Rectangle( pos=( pos_x * tile_x + wid.x, pos_y * tile_y + wid.y), size=(tile_x, tile_y)) for thing in [thing for thing in self.env.things if isinstance(thing, Wall)]: pos_x, pos_y = thing.location Color(1, 1, 1, 1) Rectangle(texture=self.wall_img.texture, pos=(pos_x * tile_x + wid.x, pos_y * tile_y + wid.y), size=(tile_x, tile_y)) for thing in [thing for thing in self.env.things if isinstance(thing, ALL_AGENTS.get(self.agentA, Agent)) or isinstance(thing, ALL_AGENTS.get(self.agentB, Agent))]: pos_x, pos_y = thing.location if self.agentA in ALL_AGENTS and\ isinstance(thing, ALL_AGENTS[self.agentA]): self.scoreA = thing.performance labelA.text = self.get_scores()[0] Color(1, 1, 1, 1) Rectangle(texture=self.agentAImg.texture, pos=(pos_x * tile_x + wid.x, pos_y * tile_y + wid.y), size=(tile_x, tile_y)) if self.agentB in ALL_AGENTS and\ isinstance(thing, ALL_AGENTS[self.agentB]): self.scoreB = thing.performance labelB.text = self.get_scores()[1] Color(1, 1, 1, 1) Rectangle(texture=self.agentBImg.texture, pos=(pos_x * tile_x + wid.x, pos_y * tile_y + wid.y), size=(tile_x, tile_y)) def load_env(self, labels, wid, *largs): """Load and prepare the environment.""" self.running = False self.counter_steps = 0 if self.map is None or self.map == "Maps": gen_popup("Error!", "No map selected...").open() return elif self.agentA not in ALL_AGENTS and\ self.agentB not in ALL_AGENTS: gen_popup("Error!", "You must choose at least one agent...").open() return self.__initialize_env() self.initialized = True self.update_canvas(labels, wid) def running_step(self, labels, wid, n_step=None, *largs): """Run the program of the environment, called from run.""" if self.env is not None: if n_step is not None: if self.counter_steps == n_step: self.running = False self.btn_100step.state = "normal" self.counter_steps = 0 return False else: self.counter_steps += 1 if not self.running: return False self.env.step() self.update_canvas(labels, wid) def btn_step(self, labels, wid, *largs): """Update the environment one step.""" if not self.initialized: gen_popup("Error!", "You must load a map...").open() return elif self.agentA == "Agent A" and self.agentB == "Agent B": popup = gen_popup( "Error!", "Agent not selected, reset required...", False).open() Clock.schedule_once(popup.dismiss, timeout=2) Clock.schedule_once(self.partial_reset, timeout=2) return if self.env is not None: self.env.step() self.update_canvas(labels, wid) def btn_100step(self, function, labels, wid, *largs): "
""Update the environment one step.""" if not self.initialized: gen_popup("Error!", "You must load a map...").open() self.btn_100step.state = "normal" return elif self.agentA == "Agent A" and self.agentB == "Agent B": popup = gen_popup( "Error!", "Agent not se
lected, reset required...", False).open() Clock.schedule_once(popup.dismiss, timeout=2) Clock.schedule_once(self.partial_reset, timeout=2) self.btn_100step.state = "down" self.running = True Clock.schedule_interval(partial(function, labels, wid, 100), 1 / 30.) def btn_run(self, function, labels, wid, *largs): """Run a function for the update.""" if not self.initialized: gen_popup("Error!", "You must load a map...").open() self.btn_run.state = "normal" return elif self.agentA == "Agent A" and self.agentB == "Agent B": popup = gen_popup( "Error!", "Agent not selected, reset required...", False).open() Clock.schedule_once(popup.dismiss, timeout=2) Clock.schedule_once(self.partial_reset, timeout=2) self.btn_run.state = "down" self.running = True Clock.schedule_interval(partial(function, labels, wid), 1 / 30.) def btn_stop(self, function, *largs): """Stop a specific fuction.""" if not self.initialized: gen_popup("Error!", "You must load a map...").open() return elif self.agentA == "Agent A" and self.agentB == "Agent B": popup = gen_popup( "Error!", "Agent not selected, reset required...", False).open() Clock.schedule_once(popup.dismiss, timeout=2) Clock.schedule_once(self.partial_reset, timeout=2) self.running = False self.counter_steps = 0 self.btn_run.state = "normal" self.btn_100step.state = "normal" Clock.unschedule(function) @staticmethod def reset_popup(popup, *largs): popup.dismiss() gen_popup("INFO", "Reset done!!!").open() def reset_all(self, labels, spinners, wid, *largs): """Clear the entire environment.""" popup = gen_popup("WARNING!", "I'm deleting everything!!!", False) popup.open() self.initialized = False self.agentA = "Agent A" self.agentB = "Agent B" self.map = None self.running = False self.counter_steps = 0 self.scoreA = 0 self.scoreB = 0 self.__initialize_env() wid.canvas.clear() labelA, labelB = labels labelA.text = self.get_scores()[0] labelB.text = self.get_scores()[1] reload(env_list) reload(agent_list) global ALL_AGENTS global ALL_MAPS ALL_AGENTS = agent_list.load_agents() ALL_MAPS = env_list.ALL_MAPS spinnerA, spinnerB, spinnerMap = spinners spinnerA.values = sorted( [agent for agent in list(ALL_AGENTS.keys())]) + ["Agent A"] spinnerB.values = sorted( [agent for agent in list(ALL_AGENTS.keys())]) + ["Agent B"] spinnerMap.values = sorted( [map for map in list(ALL_MAPS.keys())]) + ["Maps"] spinnerA.text = "Agent A" spinnerB.text = "Agent B" spinnerMap.text = "Maps" self.counter.text = str(self.counter_steps) Clock.schedule_once(partial(self.reset_popup, popup), timeout=1) def reload_agents(self, labels, spinners, wid, *largs): """Reload all agents classes.""" self.running = False self.counter_steps = 0 self.scoreA = 0 self.scoreB = 0 labelA, labelB = labels labelA.text = self.get_scores()[0] labelB.text = self.get_score
import socket import string from driver import driver class WiFiMouseDriver(driver): ACTION_KEYS = { 'TAB': 'TAB', 'ENTER': 'RTN', 'ESCAPE': 'ESC', 'PAGE_UP': 'PGUP', 'PAGE_DOWN': 'PGDN', 'END': 'END', 'HOME': 'HOME', 'LEFT': 'LF', 'UP': 'UP', 'RIGHT': 'RT', 'DOWN': 'DW', 'BACK_SPACE': 'BAS', 'F1': 'F1', 'F2': 'F2', 'F3': 'F3', 'F4': 'F4', 'F5': 'F5', 'F6': 'F6', 'F7': 'F7', 'F8': 'F8', 'F9': 'F9', 'F10': 'F10', 'F11': 'F11', 'F12': 'F12', 'CONTROL': 'CTRL', 'ALT': 'ALT', 'SHIFT': 'SHIFT', } SERVER_PORT = 1978 def __init__(self, ip): self._ip = ip self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.connect((ip, WiFiMouseDriver.SERVER_PORT)) self._socket.setblocking(1) def _send(self, data): print "sending: " + data self._socket.send(data) def left_click(self): self._send("mos 1c") return self def right_click(self): self._send("mos 5R r d") self._send("mos 5R r u") return self def move_mouse(self, deltaX, deltaY): # maximum movement is 99 in any direction currX = deltaX if deltaX > 0: while currX > 0: moveX = min(currX, 99) self._send("mos " + str(len(str(moveX)) + len(str(0)) + 3) + "m " + str(moveX) + " " + str(0)) currX -= moveX elif deltaX < 0: while currX < 0: moveX = max(currX, -99) self._send("mos " + str(len(str(moveX)) + len(str(0)) + 3) + "m " + str(moveX) + " " + str(0)) currX -= moveX currY = deltaY if deltaY > 0: while currY > 0: moveY = min(currY, 99) self._send("mos " + str(len(str(0)) + len(str(moveY)) + 3) + "m " + str(0) + " " + str(moveY)) currY -= moveY elif deltaY < 0: while currY < 0: moveY = max(currY, -99) self._send("mos " + str(len(str(0)) + len(str(moveY)) + 3) + "m " + str(0) + " " + str(moveY)) currY -= move
Y return self def typeText(self, text): format = "key " for char in text: self._send(format + str(
len(char)) + char) return self def press_action_key(self, name, shift=False, ctrl=False, alt=False): if name not in WiFiMouseDriver.ACTION_KEYS: raise ValueError('Unknown action key name: %s' % name) format = "key " command = str(WiFiMouseDriver.ACTION_KEYS[name]) self._send(format + str(len(command)) + command) return self def close(self): self._socket.close()
""" Authenication and Authorization tests which require DC/OS Enterprise. Currently test against root marathon. Assume we will want to test these against MoM EE """ import common import dcos import pytest import shakedown from urllib.parse import urljoin from dcos import marathon from shakedown import credentials, ee_version @pytest.mark.skipif("ee_version() is None") @pytest.mark.usefixtures('credentials') def test_non_authenicated_user(): with shakedown.no_user(): with pytest.raises(dcos.errors.DCOSAuthenticationException) as exec_info: response = dcos.http.get(urljoin(shakedown.dcos_url(), 'service/marathon/v2/apps')) error = exc_info.value assert str(error) == "Authentication failed. Please run `dcos auth login`" @pytest.mark.skipif("ee_version() is None") @pytest.mark.usefixtures('credentials') def test_non_authorized_user(): with shakedown.new_dcos_user('kenny', 'kenny'): with pytest.raises(dcos.errors.DCOSAuthorizationException) as exec_info: response = dcos.http.get(urljoin(shakedown.dcos_url(
), 'ser
vice/marathon/v2/apps')) error = exc_info.value assert str(error) == "You are not authorized to perform this operation" @pytest.fixture(scope="function") def billy(): shakedown.add_user('billy', 'billy') shakedown.set_user_permission(rid='dcos:adminrouter:service:marathon', uid='billy', action='full') shakedown.set_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full') yield shakedown.remove_user_permission(rid='dcos:adminrouter:service:marathon', uid='billy', action='full') shakedown.remove_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full') shakedown.remove_user('billy') @pytest.mark.skipif("ee_version() is None") @pytest.mark.usefixtures('credentials') def test_authorized_non_super_user(billy): with shakedown.dcos_user('billy', 'billy'): client = marathon.create_client() len(client.get_apps()) == 0
(selfAddon.getAddonInfo('profile'))#selfAddon["profile"]) #F4Mversion='' class interalSimpleDownloader(): outputfile ='' clientHeader=None def __init__(self): self.init_done=False def thisme(self): return 'aaaa' def openUrl(self,url, ischunkDownloading=False): try: post=None openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler) if post: req = urllib2.Request(url, post) else: req = urllib2.Request(url) ua_header=False if self.clientHeader: for n,v in self.clientHeader: req.add_header(n,v) if n=='User-Agent': ua_header=True if not ua_header: req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36') #response = urllib2.urlopen(req) if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ): req.set_proxy(self.proxy, 'http') response = openner.open(req) return response except: #print 'Error in getUrl' traceback.print_exc() return None def getUrl(self,url, ischunkDownloading=False): try: post=None openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler) if post: req = urllib2.Request(url, post) else: req = urllib2.Request(url) ua_header=False if self.clientHeader: for n,v in self.clientHeader: req.add_header(n,v) if n=='User-Agent': ua_header=True if not ua_header: req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36') #response = urllib2.urlopen(req) if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ): req.set_prox
y(self.proxy, 'http') response = openner.open(req) data=response.read()
return data except: #print 'Error in getUrl' traceback.print_exc() return None def init(self, out_stream, url, proxy=None,g_stopEvent=None, maxbitRate=0): try: self.init_done=False self.init_url=url self.clientHeader=None self.status='init' self.proxy = proxy self.maxbitRate=maxbitRate if self.proxy and len(self.proxy)==0: self.proxy=None self.out_stream=out_stream self.g_stopEvent=g_stopEvent if '|' in url: sp = url.split('|') url = sp[0] self.clientHeader = sp[1] self.clientHeader= urlparse.parse_qsl(self.clientHeader) #print 'header recieved now url and headers are',url, self.clientHeader self.status='init done' self.url=url #self.downloadInternal( url) return True #os.remove(self.outputfile) except: traceback.print_exc() self.status='finished' return False def keep_sending_video(self,dest_stream, segmentToStart=None, totalSegmentToSend=0): try: self.status='download Starting' self.downloadInternal(self.url,dest_stream) except: traceback.print_exc() self.status='finished' def downloadInternal(self,url,dest_stream): try: url=self.url fileout=dest_stream self.status='bootstrap done' while True: response=self.openUrl(url) buf="start" firstBlock=True try: while (buf != None and len(buf) > 0): if self.g_stopEvent and self.g_stopEvent.isSet(): return buf = response.read(200 * 1024) fileout.write(buf) #print 'writing something..............' fileout.flush() try: if firstBlock: firstBlock=False if self.maxbitRate and self.maxbitRate>0:# this is for being sports for time being #print 'maxbitrate',self.maxbitRate ec=EdgeClass(buf,url,'http://www.en.beinsports.net/i/PerformConsole_BEIN/player/bin-release/PerformConsole.swf',sendToken=False) ec.switchStream(self.maxbitRate,"DOWN") except: traceback.print_exc() response.close() fileout.close() #print time.asctime(), "Closing connection" except socket.error, e: #print time.asctime(), "Client Closed the connection." try: response.close() fileout.close() except Exception, e: return except Exception, e: traceback.print_exc(file=sys.stdout) response.close() fileout.close() except: traceback.print_exc() return class EdgeClass(): def __init__(self, data, url, swfUrl, sendToken=False, switchStream=None): self.url = url self.swfUrl = swfUrl self.domain = self.url.split('://')[1].split('/')[0] self.control = 'http://%s/control/' % self.domain self.onEdge = self.extractTags(data,onEdge=True) self.sessionID=self.onEdge['session'] self.path=self.onEdge['streamName'] #print 'session',self.onEdge['session'] #print 'Edge variable',self.onEdge #print 'self.control',self.control #self.MetaData = self.extractTags(data,onMetaData=True) if sendToken: self.sendNewToken(self.onEdge['session'],self.onEdge['streamName'],self.swfUrl,self.control) def getURL(self, url, post=False, sessionID=False, sessionToken=False): try: #print 'GetURL --> url = '+url opener = urllib2.build_opener() if sessionID and sessionToken: opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:14.0) Gecko/20100101 Firefox/14.0.1' ), ('x-Akamai-Streaming-SessionToken', sessionToken ), ('x-Akamai-Streaming-SessionID', sessionID ), ('Content-Type', 'text/xml' )] elif sessionID: opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:14.0) Gecko/20100101 Firefox/14.0.1' ), ('x-Akamai-Streaming-SessionID', sessionID ), ('Content-Type', 'text/xml' )] else: opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:14.0) Gecko/20100101 Firefox/14.0.1' )] if not post: usock=opener.open(url) else: usock=opener.open(url,':)') response=usock.read() usock.close() except urllib2.URLError, e: #print 'Error reason: ', e return False else: return response def extractTags(self, filedata, onEdge=True,onMetaData=False): f = StringIO(filedata)
# coding: utf-8 # Copyright 2017 Solthis. # # This file is part of Fugen 2.0. # # Fugen 2.0 is free software: you can redistribute it and/or modify # i
t under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Fugen 2.0 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANT
ABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Fugen 2.0. If not, see <http://www.gnu.org/licenses/>. import pandas as pd from dateutil.relativedelta import relativedelta from data.indicators.patient_indicator import PatientIndicator from data.indicators.lost_patients import LostPatients from data.indicators.arv_started_patients import ArvStartedPatients from data.indicators.dead_patients import ArvDeadPatientsDuringPeriod from data.indicators.transferred_patients import ArvTransferredPatientsDuringPeriod from utils import getFirstDayOfPeriod, getLastDayOfPeriod class ArvLostBackPatients(PatientIndicator): def under_arv(self): return False @classmethod def get_key(cls): return "ARV_LOST_BACK" @classmethod def get_display_label(cls): return "Perdus de vue de retour dans le TARV" def filter_patients_dataframe(self, limit_date, start_date=None, include_null_dates=False): lost_prev = LostPatients(self.fuchia_database) arv_started = ArvStartedPatients(self.fuchia_database) n_limit = limit_date - relativedelta(months=1) n_start = start_date - relativedelta(months=1) i = (lost_prev & arv_started) prev_lost_patients = i.get_filtered_patients_dataframe( getLastDayOfPeriod(n_limit.month, n_limit.year), start_date=getFirstDayOfPeriod(n_start.month, n_start.year), include_null_dates=include_null_dates ) visits = self.filter_visits_by_category( limit_date, start_date=None, include_null_dates=include_null_dates ) c1 = (visits['visit_date'] >= start_date) c2 = (visits['visit_date'] <= limit_date) visits = visits[c1 & c2] seen_id = pd.Index(visits['patient_id'].unique()) # Arv dead during the period must be re-included arv_dead = ArvDeadPatientsDuringPeriod(self.fuchia_database) dead = arv_dead.get_filtered_patients_dataframe( limit_date, start_date=start_date, include_null_dates=include_null_dates ) seen_id = seen_id.union(dead.index) # Transferred during the period must be re-included arv_trans = ArvTransferredPatientsDuringPeriod(self.fuchia_database) trans = arv_trans.get_filtered_patients_dataframe( limit_date, start_date=start_date, include_null_dates=include_null_dates ) seen_id = seen_id.union(trans.index) n_index = prev_lost_patients.index.intersection(seen_id) return prev_lost_patients.loc[n_index], None
._machine = machine_class(machine_params.options) self._machine.set_suppression(self._is_running) self._machine.add_state_callback(self._machine_state_callback) self._machine.add_stroke_callback(self._machine_stroke_callback) self._machine_params = machine_params update_keymap = True start_machine = True elif self._machine is not None: update_keymap = 'system_keymap' in config_update if update_keymap: machine_keymap = config['system_keymap'] if machine_keymap is not None: self._machine.set_keymap(machine_keymap) if start_machine: self._machine.start_capture() # Update running extensions. enabled_extensions = config['enabled_extensions'] running_extensions = set(self._running_extensions) self._stop_extensions(running_extensions - e
nabled_extensions) self._start_extensions(enabled_extensions - running_extensions) # Trigger `config_changed` hook. if config_update: self._trigger_hook('config_changed', config_update) # Update dictionaries. config_dictionaries = OrderedDict( (d.path, d) for d in config['dictionaries'] ) copy_default_dictionaries(config_dictionaries.keys()) # Start by unloading outdated dictionaries.
self._dictionaries_manager.unload_outdated() self._set_dictionaries([ d for d in self._dictionaries.dicts if d.path in config_dictionaries and \ d.path in self._dictionaries_manager ]) # And then (re)load all dictionaries. dictionaries = [] for result in self._dictionaries_manager.load(config_dictionaries.keys()): if isinstance(result, DictionaryLoaderException): d = ErroredDictionary(result.path, result.exception) # Only show an error if it's new. if d != self._dictionaries.get(result.path): log.error('loading dictionary `%s` failed: %s', shorten_path(result.path), str(result.exception)) else: d = result d.enabled = config_dictionaries[d.path].enabled dictionaries.append(d) self._set_dictionaries(dictionaries) def _start_extensions(self, extension_list): for extension_name in extension_list: log.info('starting `%s` extension', extension_name) try: extension = registry.get_plugin('extension', extension_name).obj(self) except KeyError: # Plugin not installed, skip. continue try: extension.start() except Exception: log.error('initializing extension `%s` failed', extension_name, exc_info=True) else: self._running_extensions[extension_name] = extension def _stop_extensions(self, extension_list): for extension_name in list(extension_list): log.info('stopping `%s` extension', extension_name) extension = self._running_extensions.pop(extension_name) extension.stop() del extension def _quit(self, code): self._stop() self.code = code self._trigger_hook('quit') return True def _toggle_output(self): self._set_output(not self._is_running) def _set_output(self, enabled): if enabled == self._is_running: return self._is_running = enabled if enabled: self._translator.set_state(self._running_state) else: self._translator.clear_state() if self._machine is not None: self._machine.set_suppression(enabled) self._trigger_hook('output_changed', enabled) def _machine_state_callback(self, machine_state): self._same_thread_hook(self._on_machine_state_changed, machine_state) def _machine_stroke_callback(self, steno_keys): self._same_thread_hook(self._on_stroked, steno_keys) @with_lock def _on_machine_state_changed(self, machine_state): assert machine_state is not None self._machine_state = machine_state self._trigger_hook('machine_state_changed', self._machine_params.type, machine_state) def _consume_engine_command(self, command, force=False): # The first commands can be used whether plover has output enabled or not. command_name, *command_args = command.split(':', 1) command_name = command_name.lower() if command_name == 'resume': self._set_output(True) return True elif command_name == 'toggle': self._toggle_output() return True elif command_name == 'quit': self.quit() return True if not force and not self._is_running: return False # These commands can only be run when plover has output enabled. if command_name == 'suspend': self._set_output(False) elif command_name == 'configure': self._trigger_hook('configure') elif command_name == 'focus': self._trigger_hook('focus') elif command_name == 'add_translation': self._trigger_hook('add_translation') elif command_name == 'lookup': self._trigger_hook('lookup') elif command_name == 'suggestions': self._trigger_hook('suggestions') else: command_fn = registry.get_plugin('command', command_name).obj command_fn(self, command_args[0] if command_args else '') return False def _execute_engine_command(self, command, force=False): self._consume_engine_command(command, force=force) return False def _on_stroked(self, steno_keys): stroke = Stroke(steno_keys) log.stroke(stroke) self._translator.translate(stroke) self._trigger_hook('stroked', stroke) def _on_translated(self, old, new): if not self._is_running: return self._trigger_hook('translated', old, new) def _send_backspaces(self, b): if not self._is_running: return self._keyboard_emulation.send_backspaces(b) self._trigger_hook('send_backspaces', b) def _send_string(self, s): if not self._is_running: return self._keyboard_emulation.send_string(s) self._trigger_hook('send_string', s) def _send_key_combination(self, c): if not self._is_running: return self._keyboard_emulation.send_key_combination(c) self._trigger_hook('send_key_combination', c) def _send_engine_command(self, command): suppress = not self._is_running suppress &= self._consume_engine_command(command) if suppress: self._machine.suppress_last_stroke(self._keyboard_emulation.send_backspaces) def toggle_output(self): self._same_thread_hook(self._toggle_output) def set_output(self, enabled): self._same_thread_hook(self._set_output, enabled) @property @with_lock def machine_state(self): return self._machine_state @property @with_lock def output(self): return self._is_running @output.setter def output(self, enabled): self._same_thread_hook(self._set_output, enabled) @property @with_lock def config(self): return self._config.as_dict() @config.setter def config(self, update): self._same_thread_hook(self._update, config_update=update) @with_lock def __getitem__(self, setting): return self._config[setting] def __setitem__(self, setting, value): self.config = {setting: value} def reset_machine(self): self._same_thread_hook(self._update, reset_machine=True) def load_config(self): try: self._config.load() except Exception: log.error('loading configuration failed, re
node_target in node.targets: visit(node_target, source, state) visit(node.value, source, state) target_names = get_names(node.targets[0]) value_names = get_names(node.value) for target, value in zip(target_names, value_names): if target and value: path: QualifiedName = ("..",) with ExitStack() as stack: for name in target: stack.enter_context(state.scope(name)) stack.enter_context(state.scope(".")) path += ("..",) state.alias(path + value + (".",)) def is_static_method(node: ast.FunctionDef) -> bool: return any( n.id == "staticmethod" for n in node.decorator_list if isinstance(n, ast.Name) ) @visit.register def visit_function_definition( node: ast.FunctionDef, source: Source, state: State ) -> None: is_method = state.lookup_scopes[-1] and state.lookup_scopes[-1].is_class position = node_position(node, source, column_offset=len("def ")) with state.scope(node.name): state.add_occurrence(position=position) with state.scope("()"): for i, arg in enumerate(node.args.args): position = node_position(arg, source) with state.scope(arg.arg): state.add_occurrence(position=position) if i == 0 and is_method and not is_static_method(node): with state.scope("."): state.alias(("..", "..", "..", "..")) generic_visit(node, source, state) @visit.register def visit_class(node: ast.ClassDef, source: Source, state: State) -> None: position = node_position(node, source, column_offset=len("class ")) for base in node.bases: visit(base, source, state) with state.scope(node.name, lookup_scope=True, is_class=True): state.add_occurrence(position=position) with state.scope("()"): with state.scope("."): state.alias(("..", "..", ".")) for base in node.bases: state.alias(("..", "..", "..") + names_from(base) + ("()", ".")) for statement in node.body: visit(statement, source, state) @visit.register def visit_call(node: ast.Call, source: Source, state: State) -> None: call_position = node_position(node, source) for arg in node.args: visit(arg, source, state) visit(node.func, source, state) names = names_from(node.func) with ExitStack() as stack: if names: stack.enter_context(state.scope(names[0])) for name in names[1:]: stack.enter_context(state.scope(name)) stack.enter_context(state.scope("()")) for keyword in node.keywords: if not keyword.arg: continue position = source.find_after(keyword.arg, call_position) with state.scope(keyword.arg): state.add_occurrence(position=position) @singledispatch def names_from(node: ast.AST) -> QualifiedName: # pylint: disable=unused-argument return () @names_from.register def name_names(node: ast.Name) -> QualifiedName: return (node.id,) @names_from.register def attribute_names(node: ast.Attribute) -> QualifiedName: return names_from(node.value) + (".", node.attr) @names_from.register def call_names(node: ast.Call) -> QualifiedName: names = names_from(node.func) return names @visit.register def visit_attribute(node: ast.Attribute, source: Source, state: State) -> None: visit(node.value, source, state) position = node_position(node, source) names = names_from(node.value) with ExitStack() as stack: for name in names: position = source.find_after(name, position) stack.enter_context(state.scope(name)) stack.enter_context(state.scope(".")) position = source.find_after(node.attr, position) stack.enter_context(state.scope(node.attr)) state.add_occurrence(position=position) def visit_comp( node: Union[ast.DictComp, ast.ListComp, ast.SetComp, ast.GeneratorExp], source: Source, state: State, *sub_nodes, ) -> None: position = node_position(node, source) name = f"{type(node)}-{position.row},{position.column}" with state.scope(name): for generator in node.generators: visit(generator.target, source, state) visit(generator.iter, source, state) for if_node in generator.ifs: visit(if_node, source, state) for sub_node in sub_nodes: visit(sub_node, source, state) @visit.register def visit_dict_comp(node: ast.DictComp, source: Source, state: State) -> None: visit_comp(node, source, state, node.key, node.value) @visit.register def visit_list_comp(node: ast.ListComp, source: Source, state: State) -> None: visit_comp(node, source, state, node.elt) @visit.register def visit_set_comp(node: ast.SetComp, source: Source, state: State) -> None: visit_comp(node, source, state, node.elt) @visit.register def visit_generator_exp(node: ast.GeneratorExp, source: Source, state: State) -> None: visit_comp(node, source, state, node.elt) def all_occurrence_positions( position: Position, ) -> Iterable[Position]: source = position.source state = State(position) visit(source.get_ast(), source=source, state=state) if state.found: return sorted(state.found.occurrences) return [] def test_distinguishes_local_variables_from_global(): source = make_source( """ def fun(): old = 12 old2 = 13 result = old + old2 del old return result old = 20 """ ) position = source.position(row=2, column=4) assert all_occurrence_positions(position) == [ source.position(row=2, column=4), source.position(row=4, column=13), source.position(row=5, column=8), ] def test_finds_non_local_variable(): source = make_source( """ old = 12 def fun(): result = old + 1 return result old = 20 """ ) position = source.position(1, 0) assert all_occurrence_positions(position) == [ Position(source, 1, 0), Position(source, 4, 13), Position(source, 7, 0), ] def test_does_not_rename_random_attributes(): source = make_source( """ import os path = os.path.dirname(__file__) """ ) position = source.position(row=3, column=0) assert all_occurrence_positions(position) == [source.position(row=3, column=0)] def test_finds_parameter(): source = make_source( """ def fun(old=1): print(old) old = 8 fun(old=old) """ ) assert all_occurrence_positions(source.position(1, 8)) == [ source.position(1, 8), source.position(2, 10), source.position(5, 4), ] def test_finds_function(): source = mak
e_source( """ def fun_old(): return 'result' result = fun_old() """ ) assert [source.position(1, 4), source.position(3, 9)] == all_occurrence_positions( source.position(1, 4) ) def test_finds_class(): source = make_source( """
class OldClass: pass instance = OldClass() """ ) assert [source.position(1, 6), source.position(4, 11)] == all_occurrence_positions( source.position(1, 6) ) def test_finds_method_name(): source = make_source( """ class A: def old(self): pass unbound = A.old """ ) position = source.position(row=3, column=8) assert all_occurrence_positions(position) == [ source.position(row=3, column=8), source.position(row=6, column=12), ] def test_finds_passed_argument(): source = make_source( """ old = 2 def fun(arg, arg2): r
the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import logging from programy.config.base import BaseConfigurationData class BrainFileConfiguration(object): def __init__(self, files, extension=".aiml", directories=False): self._files = files self._extension = extension self._directories = directories @property def files(self): return self._files @property def extension(self): return self._extension @property def directories(self): return self._directories class BrainServiceConfiguration(object): def __init__(self, name, data=None): self._name = name.upper() self._params = {} if data is not None: for key in data.keys(): self._params[key.upper()] = data[key] @property def name(self): return self._name @property def path(self): return self._params['PATH'] def parameters(self): return self._params.keys() def set_parameter(self, key, value): self._params[key] = value def parameter(self, name): if name in self._params: return self._params[name] else: return None class BrainConfiguration(BaseConfigurationData): DEFAULT_SUPRESS_WARNINGS = False DEFAULT_ALLOW_SYSTEM_AIML = True DEFAULT_ALLOW_LEARN_AIML = True DEFAULT_ALLOW_LEARNF_AIML = True def __init__(self): self._supress_warnings = BrainConfiguration.DEFAULT_SUPRESS_WARNINGS self._allow_system_aiml = BrainConfiguration.DEFAULT_ALLOW_SYSTEM_AIML self._allow_learn_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARN_AIML self._allow_learnf_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML self._aiml_files = None self._set_files = None self._map_files = None self._denormal = None self._normal = None self._gender = None self._person = None self._person2 = None self._predicates = None s
elf._pronouns = None self._properties = None self._triples = None self._preprocessors = None self._postprocessors = None self._services = [] BaseConfigurationData.__init__(self, "brain") def _get_brain_file_configuration(self, config_file, section, bot_root): files = config_file.get_option(section, "files") files = self.sub_bot_root(files, bot_root) extensi
on = config_file.get_option(section, "extension") directories = config_file.get_option(section, "directories") return BrainFileConfiguration(files, extension, directories) def load_config_section(self, config_file, bot_root): brain = config_file.get_section(self.section_name) if brain is not None: self._supress_warnings = config_file.get_option(brain, "supress_warnings", BrainConfiguration.DEFAULT_SUPRESS_WARNINGS) self._allow_system_aiml = config_file.get_option(brain, "allow_system_aiml", BrainConfiguration.DEFAULT_ALLOW_SYSTEM_AIML) self._allow_learn_aiml = config_file.get_option(brain, "allow_learn_aiml", BrainConfiguration.DEFAULT_ALLOW_LEARN_AIML) self._allow_learnf_aiml = config_file.get_option(brain, "allow_learnf_aiml", BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML) self._allow_learnf_aiml = config_file.get_option(brain, "allow_learnf_aiml", BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML) files = config_file.get_section("files", brain) if files is not None: aiml = config_file.get_section("aiml", files) self._aiml_files = self._get_brain_file_configuration(config_file, aiml, bot_root) sets = config_file.get_section("sets", files) self._set_files = self._get_brain_file_configuration(config_file, sets, bot_root) maps = config_file.get_section("maps", files) self._map_files = self._get_brain_file_configuration(config_file, maps, bot_root) self._denormal = self._get_file_option(config_file, "denormal", files, bot_root) self._normal = self._get_file_option(config_file, "normal", files, bot_root) self._gender = self._get_file_option(config_file, "gender", files, bot_root) self._person = self._get_file_option(config_file, "person", files, bot_root) self._person2 = self._get_file_option(config_file, "person2", files, bot_root) self._predicates = self._get_file_option(config_file, "predicates", files, bot_root) self._pronouns = self._get_file_option(config_file, "pronouns", files, bot_root) self._properties = self._get_file_option(config_file, "properties", files, bot_root) self._triples = self._get_file_option(config_file, "triples", files, bot_root) self._preprocessors = self._get_file_option(config_file, "preprocessors", files, bot_root) self._postprocessors = self._get_file_option(config_file, "postprocessors", files, bot_root) else: logging.warning("Config section [files] missing from Brain, default values not appropriate") raise Exception ("Config section [files] missing from Brain") services = config_file.get_section("services", brain) if services is not None: service_keys = config_file.get_child_section_keys("services", brain) for name in service_keys: service_data = config_file.get_section_data(name, services) self._services.append(BrainServiceConfiguration(name, service_data)) else: logging.warning("Config section [services] missing from Brain, no services loaded") else: logging.warning("Config section [%s] missing, using default values", self.section_name) self._supress_warnings = BrainConfiguration.DEFAULT_SUPRESS_WARNINGS self._allow_system_aiml = BrainConfiguration.DEFAULT_ALLOW_SYSTEM_AIML self._allow_learn_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARN_AIML self._allow_learnf_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML self._allow_learnf_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML @property def supress_warnings(self): return self._supress_warnings @property def allow_system_aiml(self): return self._allow_system_aiml @property def allow_learn_aiml(self): return self._allow_learn_aiml @property def allow_learnf_aiml(self): return self._allow_learnf_aiml @property def aiml_files(self): return self._aiml_files @property def set_files(self): return self._set_files @property def map_files(self): return self._map_files @property def denormal(self): return self._denormal @property def normal(self): return self._normal @property def gender(self): return self._gender @property def person(self): return self._person @property def pe
from mocket import Mocket, mocketize from mocket.async_mocket import async_mocketize from mocket.compat import byte_type, text_type from mocket.mockhttp import Entry as MocketHttpEntry from mocket.mockhttp import Request as MocketHttpRequest from mocket.mockhttp import Response as MocketHttpResponse def httprettifier_headers(headers): return {k.lower().replace("_", "-"): v for k, v in headers.items()} class Request(MocketHttpRequest): @property def body(self): if self._body is None: self._body = self.parser.recv_body() return self._body class Response(MocketHttpResponse): def get_protocol_data(self, str_format_fun_name="lower"): if "server" in self.headers and self.headers["server"] == "Python/Mocket": self.headers["server"] = "Python/HTTPretty" return super(Response, self).get_protocol_data( str_format_fun_name=str_format_fun_name ) def set_base_headers(self): super(Response, self).set_base_headers() self.headers = httprettifier_headers(self.headers) original_set_base_headers = set_base_headers def set_extra_headers(self, headers): self.headers.update(headers) class Entry(MocketHttpEntry): request_cls = Request response_cls = Response activate = mocketize httprettified = mocketize async_httprettified = async_mocketize enable = Mocket.enable disable = Mocket.disable reset = Mocket.reset GET = Entry.GET PUT = Entry.PUT POST = Entry.POST DELETE = Entry.DELETE HEAD = Entry.HEAD PATCH = Entry.PATCH OPTIONS = Entry.OPTIONS def register_uri( method, uri, body="HTTPretty :)", adding_headers=None, forcing_headers=None, status=200, responses=None, match_querystring=False, priority=0, **headers ): headers = httprettifier_headers(headers) if adding_headers is not None: headers.update(httprettifier_headers(adding_headers)) if forcing_headers is not None: def force_headers(self): self.headers = httprettifier_headers(forcing_headers) Response.set_base_headers = force_headers else: Response.set_base_headers = Response.original_set_base_headers if responses: Entry.register(method, uri, *responses) else: Entry.single_register( method, uri, body=body, status=status, headers=headers, match_querystring=match_querystring, ) class MocketHTTPretty: Response = Response def __getattr__(self,
name): if name == "last_request": return Mocket.last_request() if name == "l
atest_requests": return Mocket.request_list() return getattr(Entry, name) HTTPretty = MocketHTTPretty() HTTPretty.register_uri = register_uri httpretty = HTTPretty __all__ = ( "HTTPretty", "activate", "async_httprettified", "httprettified", "enable", "disable", "reset", "Response", "GET", "PUT", "POST", "DELETE", "HEAD", "PATCH", "register_uri", "text_type", "byte_type", )
"""Support for switches through the SmartThings cloud API.""" from __future__ import annotations from collections.abc import Sequence from pysmartthings import Capability from homeassistant.components.switch import SwitchEntity from . import SmartThingsEntity from .const import DATA_BROKERS, DOMAIN async def async_setup_entry(hass, config_entry, async_add_entities): """Add switches for a config entry.""" broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] async_add_entities( [ SmartThingsSwitch(device) for device in broker.devices.values() if broker.any_assigned(device.device_id, "switch") ] ) def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None: """Return all capabilities supported if minimum required are present.""" # Must be able to be turned on/off. if Capability.switch in capabilities: return [Capability.switch, Capability.energy_meter, Capability.power_meter] return None class SmartThingsSwitch(SmartThingsEntity, SwitchEntity): """Define a SmartThings switch.""" async def async_turn_off(self, **kwargs) -> None: """Turn the switch off.""" await self._device.switch_off(set_status=True) # State is set optimistically in the command above, therefore update # the entity state ahead of receiving the confirming push updates self.async_write_ha_state() async def async_turn_on(self, **kwargs) -> None: """Tu
rn the switch on.""" await self._device.switch_on(set_status=True) # State is set optimistically in the command above, therefore update # the entity state ahead of receiving the confirming push updates self.async_write_ha_state() @property def is_on(self) -> bool: """Return true if light is on.""" re
turn self._device.status.switch
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-05-30 17:02 from __future__ import unicode_literals fro
m django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0062_auto_20171223_1552'), ] operations = [ migrations.AddField( model_name='event', name='date_end',
field=models.DateField(blank=True, default=None, null=True), ), ]
his method defines the model itself, and must be overloaded by derived classes This function will update `self` with the layers required to execute the `call()` method :param embeddings: The input feature indices :param kwargs: :return: """ def predict(self, batch_dict, **kwargs): self.eval() numpy_to_tensor = bool(kwargs.get('numpy_to_tensor', True)) batch_dict = self.make_input(batch_dict, numpy_to_tensor=numpy_to_tensor) hidden = batch_dict.get('h') step_softmax, _ = self(batch_dict, hidden) return F.softmax(step_softmax, dim=-1) class AbstractGeneratorLanguageModel(LanguageModelBase): def create_layers(self, embeddings, **kwargs): self.embeddings = self.init_embed(embeddings, **kwargs) self.embeddings_proj = self.init_embeddings_proj(**kwargs) self.generator = self.init_generate(**kwargs) self.output_layer = self.init_output(embeddings, **kwargs) def forward(self, input: Dict[str, TensorDef], hidden: TensorDef) -> Tuple[TensorDef, TensorDef]: emb = self.embed(input) output, hidden = self.generate(emb, hidden) return self.output_layer(output), hidden def embed(self, input): embedded_dropout = self.embeddings(input) return self.embeddings_proj(embedded_dropout) def init_embed(self, embeddings: Dict[str, TensorDef], **kwargs) -> BaseLayer: """This method creates the "embedding" layer of the inputs, with an optional reduction :param embeddings: A dictionary of embeddings :Keyword Arguments: See below * *embeddings_reduction* (defaults to `concat`) An operator to perform on a stack of embeddings * *embeddings_dropout = float(kwargs.get('embeddings_dropout', 0.0)) :return: The output of the embedding stack followed by its reduction. This will typically be an output with an additional dimension which is the hidden representation of the input """ reduction = kwargs.get('embeddings_reduction', 'concat') embeddings_dropout = float(kwargs.get('embeddings_dropout', 0.0)) return EmbeddingsStack({k: embeddings[k] for k in self.src_keys}, embeddings_dropout, reduction=reduction) def init_embeddings_proj(self, **kwargs): input_sz = self.embeddings.output_dim hsz = kwargs.get('hsz', kwargs.get('d_model')) if hsz != input_sz: proj = pytorch_linear(input_sz, hsz) print('Applying a transform from {} to {}'.format(input_sz, hsz)) else: proj = nn.Identity() return proj def init_generate(self, **kwargs): pass def generate(self, emb, hidden): return self.generator((emb, hidden)) def init_output(self, embeddings, **kwargs): self.vsz = embeddings[self.tgt_key].get_vsz() hsz = kwargs.get('hsz', kwargs.get('d_model')) unif = float(kwargs.get('unif', 0.0)) do_weight_tying = bool(kwargs.get('tie_weights', False)) output_bias = kwargs.get('output_bias', False) if do_weight_tying: output = WeightTieDense(embeddings[self.tgt_key], output_bias) else: output = pytorch_linear(hsz, self.vsz, unif) return output @register_model(task='lm', name='default') class RNNLanguageModel(AbstractGeneratorLanguageModel): def
__init__(self): super().__init__() def zero_state(self, batchsz): weight = next(self.parameters()).data return (torch.autograd.Variable(weight.new(self.num_layers, batchsz, self.hsz).zero_()), torch.autograd.Variable(weight.new(self.num_layers, batchsz, self.hsz).zero_())) @prop
erty def requires_state(self): True def init_generate(self, **kwargs): pdrop = float(kwargs.get('dropout', 0.5)) self.num_layers = kwargs.get('layers', kwargs.get('num_layers', 1)) self.hsz = kwargs.get('hsz', kwargs.get('d_model')) return WithDropoutOnFirst(LSTMEncoderWithState(self.hsz, self.hsz, self.num_layers, pdrop, batch_first=True), pdrop, kwargs.get('variational', False)) @register_model(task='lm', name='transformer') class TransformerLanguageModel(AbstractGeneratorLanguageModel): def __init__(self): super().__init__() @property def requires_state(self): False def init_layer_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)): module.weight.data.normal_(mean=0.0, std=self.weight_std) if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None: module.bias.data.zero_() def init_generate(self, **kwargs): pdrop = float(kwargs.get('dropout', 0.1)) layers = kwargs.get('layers', kwargs.get('num_layers', 1)) d_model = int(kwargs.get('d_model', kwargs.get('hsz'))) num_heads = kwargs.get('num_heads', 4) d_ff = int(kwargs.get('d_ff', 4 * d_model)) rpr_k = kwargs.get('rpr_k') d_k = kwargs.get('d_k') scale = bool(kwargs.get('scale', True)) activation = kwargs.get('activation', 'gelu') ffn_pdrop = kwargs.get('ffn_pdrop', 0.0) layer_norm_eps = kwargs.get('layer_norm_eps', 1e-12) layer_norms_after = kwargs.get('layer_norms_after', False) layer_drop = kwargs.get('layer_drop', 0.0) windowed_ra = kwargs.get('windowed_ra', False) rpr_value_on = kwargs.get('rpr_value_on', True) return TransformerEncoderStack(num_heads, d_model=d_model, pdrop=pdrop, scale=scale, layers=layers, d_ff=d_ff, rpr_k=rpr_k, d_k=d_k, activation=activation, ffn_pdrop=ffn_pdrop, layer_norm_eps=layer_norm_eps, layer_norms_after=layer_norms_after, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on, layer_drop=layer_drop) def create_layers(self, embeddings, **kwargs): super().create_layers(embeddings, **kwargs) self.weight_std = kwargs.get('weight_std', 0.02) self.apply(self.init_layer_weights) def create_mask(self, bth): T = bth.shape[1] mask = subsequent_mask(T).type_as(bth) return mask def generate(self, bth, _): mask = self.create_mask(bth) return self.generator((bth, mask)), None @register_model(task='lm', name='transformer-mlm') class TransformerMaskedLanguageModel(TransformerLanguageModel): def create_mask(self, bth): return None @register_model(task='lm', name='gmlp-mlm') class GatedMLPLanguageModel(AbstractGeneratorLanguageModel): def __init__(self): super().__init__() @property def requires_state(self): False def init_layer_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)): module.weight.data.normal_(mean=0.0, std=self.weight_std) if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None: module.bias.data.zero_() def init_generate(self, **kwargs): pdrop = float(kwargs.get('dropout', 0.1)) layers = kwargs.get('layers', kwargs.get('num_layers', 1)) d_model = int(kwargs.get('d_model', kwargs.get('hsz'))) d_ff = int(kwargs.get('d_ff', 4 * d_model)) activation = kwargs.get('activation', 'gelu') ffn_pdrop = kwargs.get('ffn_pdrop', 0.0) layer_norm_eps = kwargs.get('layer_norm_eps', 1e-12) layer_drop = kwargs.get('layer_drop', 0.0) nctx = int(kwargs.get('nctx', 256)) return GatedMLPEncoderStack(d_model=d_model, pdrop=pdrop, layers=layers, nctx=nctx, d_ff=d_ff, activation=activation, ffn_pdrop=ffn_pdrop,
import click from pycolorterm.pycolorterm import print_pretty from quarantine.cdc import CDC from sh import ErrorReturnCode @click.group() def cli(): pass @cli.command() @click.argument('name') @click.argument('pip_args', nargs=-1) def install(na
me, pip_args): """Install the package. Pip args specified with --.""" cdc = CDC(name) try: cdc.install(pip_args) except ErrorReturnCode as e: print_pretty("<FG_RED>Something went wrong! Rolling back...<END>") cdc.uninstall() @cli.command() @click.argument('name') def uninstall(name): """Uninstall the package, environment and all."""
cdc = CDC(name) cdc.uninstall() if __name__ == '__main__': quarantine()
del_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name'])) cr.commit() cr.execute("""UPDATE ir_model_fields SET model_id=%s, field_description=%s, ttype=%s, relation=%s, view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s WHERE model=%s AND name=%s""", ( vals['model_id'], vals['field_description'], vals['ttype'], vals['relation'], bool(vals['view_load']), vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name'] )) break cr.commit() # # Goal: try to apply inheritance at the instanciation level and # put objects in the pool var # @classmethod def create_instance(cls, pool, cr): """ Instanciate a given model. This class method instanciates the class of some model (i.e. a class deriving from osv or osv_memory). The class might be the class passed in argument or, if it inherits from another class, a class constructed by combining the two classes. The ``attributes`` argument specifies which parent class attributes have to be combined. TODO: the creation of the combined class is repeated at each call of this method. This is probably unnecessary. """ attributes = ['_columns', '_defaults', '_inherits', '_constraints', '_sql_constraints'] parent_names = getattr(cls, '_inherit', None) if parent_names: if isinstance(parent_names, (str, unicode)): name = cls._name or parent_names parent_names = [parent_names] else: name = cls._name if not name: raise TypeError('_name is mandatory in case of multiple inheritance') for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]): parent_model = pool.get(parent_name) if not parent_model: raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n' 'You may need to add a dependency on the parent class\' module.' % (name, parent_name)) if not getattr(cls, '_original_module', None) and name == parent_model._name: cls._original_module = parent_model._original_module parent_class = parent_model.__class__ nattr = {} for s in attributes: new = copy.copy(getattr(parent_model, s, {})) if s == '_columns': # Don't _inherit custom fields. for c in new.keys(): if new[c].manual: del new[c] # Duplicate float fields because they have a .digits # cache (which must be per-registry, not server-wide). for c in new.keys(): if new[c]._type == 'float': new[c] = copy.copy(new[c]) if hasattr(new, 'update'): new.update(cls.__dict__.get(s, {})) elif s=='_constraints': for c in cls.__dict__.get(s, []): exist = False for c2 in range(len(new)): #For _constraints, we should check field and methods as well if new[c2][2]==c[2] and (new[c2][0] == c[0] \ or getattr(new[c2][0],'__name__', True) == \ getattr(c[0],'__name__', False)): # If new class defines a constraint with
# same function name, we let it override # the old one. new[c2] = c exist = True break
if not exist: new.append(c) else: new.extend(cls.__dict__.get(s, [])) nattr[s] = new cls = type(name, (cls, parent_class), dict(nattr, _register=False)) if not getattr(cls, '_original_module', None): cls._original_module = cls._module obj = object.__new__(cls) obj.__init__(pool, cr) return obj def __new__(cls): """Register this model. This doesn't create an instance but simply register the model as being part of the module where it is defined. """ # Set the module name (e.g. base, sale, accounting, ...) on the class. module = cls.__module__.split('.')[0] if not hasattr(cls, '_module'): cls._module = module # Record this class in the list of models to instantiate for this module, # managed by the metaclass. module_model_list = MetaModel.module_to_models.setdefault(cls._module, []) if cls not in module_model_list: module_model_list.append(cls) # Since we don't return an instance here, the __init__ # method won't be called. return None def __init__(self, pool, cr): """ Initialize a model and make it part of the given registry. - copy the stored fields' functions in the osv_pool, - update the _columns with the fields found in ir_model_fields, - ensure there is a many2one for each _inherits'd parent, - update the children's _columns, - give a chance to each field to initialize itself. """ pool.add(self._name, self) self.pool = pool if not self._name and not hasattr(self, '_inherit'): name = type(self).__name__.split('.')[0] msg = "The class %s has to have a _name attribute" % name _logger.error(msg) raise except_orm('ValueError', msg) if not self._description: self._description = self._name if not self._table: self._table = self._name.replace('.', '_') if not hasattr(self, '_log_access'): # If _log_access is not specified, it is the same value as _auto. self._log_access = getattr(self, "_auto", True) self._columns = self._columns.copy() for store_field in self._columns: f = self._columns[store_field] if hasattr(f, 'digits_change'): f.digits_change(cr) def not_this_field(stored_func): x, y, z, e, f, l = stored_func return x != self._name or y != store_field self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, [])) if not isinstance(f, fields.function): continue if not f.store: continue sm = f.store if sm is True: sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, 10, None)} for object, aa in sm.items(): if len(aa) == 4: (fnct, fields2, order, length) = aa elif len(aa) == 3: (fnct, fields2, order) = aa length = None else: raise except_orm('Error', ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._na
from model.info_cont
act import Infos testdata = [ Infos(firstname="firstname1",lastname="lastname1"), Infos(firstname="firstname2",lastna
me="lastname2") ]
#__all__ = [ 'search', 'ham_dista
nce', 'lev_distance', 'distance', 'distance_matrix' ]
from mock import patch import mock from kiwi.storage.subformat.vhdx import DiskFormatVhdx class TestDiskFormatVhdx: @patch('platform.machine') def setup(self, mock_machine): mock_machine.return_value = 'x86_64' xml_data = mock.Mock() xml_data.get_name = mock.Mock( return_value='some-disk-image'
) self.xml_state = mock.Mock() self.xml_state.xml_data = xml_data self.xml_state.get_image_version = mock.Mock( return_value='1.2.3' ) self.disk_format = DiskFormatVhdx( self.xml_state, 'root_dir', 'target_dir' ) def test_post_init(self): self.disk_format.post_init({'option': 'value'})
assert self.disk_format.options == [ '-o', 'option=value', '-o', 'subformat=dynamic' ] @patch('kiwi.storage.subformat.vhdx.Command.run') def test_create_image_format(self, mock_command): self.disk_format.create_image_format() mock_command.assert_called_once_with( [ 'qemu-img', 'convert', '-f', 'raw', 'target_dir/some-disk-image.x86_64-1.2.3.raw', '-O', 'vhdx', '-o', 'subformat=dynamic', 'target_dir/some-disk-image.x86_64-1.2.3.vhdx' ] )
su = summ(loader2[ss:ss+send_len], send_len)
# send command data = swapSerialData("f7".decode("hex")) data = swapSerialData(loader2[ss:ss+send_len]) #print "2 crc is: " + binascii.b2a_hex(data) #print "2 my_crc is: " +
binascii.b2a_hex(su) #print "i: " + str(i) sys.stdout.write("\ri: " + str(i)) sys.stdout.write("\n") serialPost(ser, "FF".decode("hex")) elif s == 113: serialPost(ser, "D010000000".decode("hex")) elif s == 114: serialPost(ser, "D1".decode("hex")) elif s == 115: nand_id = (ord(data[8])<<8) + ord(data[9]) # nado proverit, chto 2,3,4 baity ravny sootvetstvenno 0xEC 0x22 0xFC # # additionally identify NAND for Swift print "Flash... " if nand_id == int(0x04): print " 16MB (128Mbit) NAND" elif nand_id == int(0x14): print " 32MB (256Mbit) NAND" elif nand_id == int(0x24): print " 64MB (512Mbit) NAND" elif nand_id == int(0x34): print "128MB ( 1Gbit) NAND" elif nand_id == int(0x0C): print " 16MB (128Mbit) NAND" elif nand_id == int(0x1C): print " 32MB (256Mbit) NAND" elif nand_id == int(0x2C): print " 64MB (512Mbit) NAND" elif nand_id == int(0x3C): print "128MB ( 1Gbit) NAND" else: print "Unknown NAND: " + str("%02x" % nand_id) # here, the bootup is completed # delay slightly (required!) time.sleep(0.25) else: #data = chr(0x44) data = chr(0x00) print "-> " + binascii.b2a_hex(data) #ser.write(data) data = ser.read() print "serial RX: " + binascii.b2a_hex(data) data = chr(0x44) print "-> " + binascii.b2a_hex(data) ser.write(data) #ser.flush() data = ser.read() print "serial RX: " + binascii.b2a_hex(data) data = chr(0x51) print "-> " + binascii.b2a_hex(data) ser.write(data) data = ser.read() print "serial RX: " + binascii.b2a_hex(data) #print ser.portstr time.sleep(0.5) # give the serial port sometime to receive the data numOfLines = 0 while True: response = ser.readline() print("read data: " + response) numOfLines = numOfLines + 1 if (numOfLines >= 5): break ser.close() except Exception, e1: print "error communicating...: " + str(e1) ser.close() import traceback traceback.print_exc() except KeyboardInterrupt: print "\nmanual interrupted!" ser.close() else: print "cannot open serial port " exit() #=========================================================== #from hktool.bootload import mediatek from hktool.bootload.mediatek import MTKBootload from threading import Thread from time import sleep as Sleep def logical_xor(str1, str2): return bool(str1) ^ bool(str2) #----- MAIN CODE ------------------------------------------- if __name__=='__main__': from sys import platform as _platform import os if _platform == "linux" or _platform == "linux2": # linux print "it is linux?" from hktool.hotplug import linux_udev as port_notify elif _platform == "darwin": # OS X print "it is osx?" print "WARNING: port_notify is not realised !!!" elif _platform == "win32": # Windows... print "it is windows?" from hktool.hotplug import windevnotif as port_notify print "sys.platform: " + _platform + ", os.name: " + os.name print "" print "Select: xml, boot, sgh, crc, usb, exit, quit, q" print "" tsk = str(raw_input("enter command > ")) if tsk.lower() in ['exit', 'quit', 'q']: os._exit(0) if tsk.lower() in ['boot']: print "Working with device communication..." print "" Thread(target = port_notify.run_notify).start() Sleep(1) port = port_notify.get_notify() print "port_name is: " + port #conn_port(port) #mediatek.init(port) m = MTKBootload(port) if 'sgh' in tsk.lower(): tsks = tsk.split() print "" print "Working with device communication..." print "" Sleep(1) port = tsks[1] print "port_name is: " + port #m = SGHBootload(port) if tsk.lower() in ['xml', 'lxml']: print "Working with lxml..." print "" from lxml import etree tree = etree.parse('../../mtk-tests/Projects/_lg-a290/data/UTLog_DownloadAgent_FlashTool.xml') root = tree.getroot() print root #entries = tree.xpath("//atom:category[@term='accessibility']/..", namespaces=NSMAP) entries = tree.xpath("//UTLOG/Request[@Dir='[OUT]']/Data") #print entries old_text = None dmp_text = False cnt_text = 0 bin_file = None for xent in entries: new_text = xent.text if new_text == old_text: continue old_text = new_text #print "-> " + new_text bin_text = new_text.replace(" ", "") bin_text = bin_text.decode("hex") bin_len = len(bin_text) print str(bin_len) + " -> " + new_text if dmp_text is False and bin_len == 1024: dmp_text = True prt = xent.getparent() atr = prt.attrib num = atr["Number"] nam = "big_" + num + ".bin" bin_file = open(nam, 'wb') print "" print "start dump big data to: " + nam if dmp_text is True: #--- import array a = array.array('H', bin_text) # array.array('H', bin_text) a.byteswap() bin_text = a.tostring() #--- bin_file.write(bin_text) if bin_len == 1024: cnt_text += 1 else: cnt_text = cnt_text * 1024 + bin_len dmp_text = False bin_file.close() print "big data length is: " + str(cnt_text) print "" cnt_text = 0 pass if tsk.lower() in ['crc']: str1 = raw_input("Enter string one:") str2 = raw_input("Enter string two:") if logical_xor(str1, str2): print "ok" else: print "bad" pass print hex(0x12ef ^ 0xabcd) print hex(int("12ef", 16) ^ int("abcd", 16)) str1 = raw_input("Enter string one: ") str2 = raw_input("Enter string two: ") print hex(int(str1, 16) ^ int(str2, 16)) pass if tsk.lower() in ['usb']: import usb.core #import usb.backend.libusb1 import usb.backend.libusb0 import logging #PYUSB_DEBUG_LEVEL = "debug" #PYUSB_LOG_FILENAME = "C:\dump" __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) __backend__ = os.path.join(__location__, "libusb0.dll") #PYUSB_LOG_FILENAME = __location__ #backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so") #backend = usb.backend.libusb1.get_backend(find_library=lambda x: __backend__) backend = usb.backend.libusb0.get_backend(find_library=lambda x: __backend__) dev = usb.core.find(find_all=True, backend=backend) #dev = usb.core.find(find_all=True) busses = usb.busses() print busses if dev is None: raise ValueError('Our device is not connected') for bus in busses: devices = bus.devices for dev in devices: try: _name = usb.util.get_string(dev.dev, 19, 1) except: continue dev.set_configuration() cfg = dev.get_active_configuration() interface_nu
# Copyright (c) 2017 Charles University in Prague, Faculty of Arts, # Institute of the Czech National Corpus # Copyright (c) 2017 Tomas Machalek <tomas.machalek@gmail.com> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # dated June, 1991. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. class UnknownFormatException(Exception): pass class AbstractChartExport(object): """ AbstractChartExport represents a single format export (e.g. PDF, Excel). """ def get_content_type(self): """ return a content type identifier (e.g. 'application/json') """ raise NotImplementedError() def get_format_name(self): """ Return a format identifier. It should be both human-readable and unique within a single plug-in installation. It means that in case of mixing of different AbstractChartExport implementations it may be necessary to modify some names to keep all the export functions available. """ raise NotImplementedError() def get_suffix(self): """ Return a proper file suffix (e.g. 'xlsx' for Excel). """ raise NotImplementedError() def export_pie_chart(self, data, title): """ Generate a PIE chart based on passed data and title. The method is expected to return raw file data ready to be downloaded by a client. """ raise NotImplementedError() class AbstractChartExportPlugin(object): """ AbstractChartExportPlugin represents plug-in itself which is expected to contain one or more implementations of AbstractChartExport. """ def get_supported_types(self): """ Return a list of supported format names (i.e. the values returned by AbstractChartExport.get_format_name() of all the installed export classes). """ return [] def get_content_type(self, format): """ Return a content type for a specified format (e.g. 'PDF' -> 'application/pdf') arguments: format -- format name (AbstractChartExport.get_format_name()) """ raise NotImplementedError() def get_suffix(self, format): """ Return a suff
ix for a specified format. arguments: format -- format name (AbstractChartExport.get_format_name()) """ raise NotImplementedError() def export_pie_chart(self, data,
title, format): """ Export PIE chart data to a PIE chart of a specified format. arguments: data -- chart data title -- chart label format -- format name (AbstractChartExport.get_format_name()) """ raise NotImplementedError()
import config #This module is used for calling the Wolfra
m Alpha API #It defines a function that constructs an URL based on th
e query. #NOTE: This module returns only the URL. This URL is passed in the bot.py file. Telegram Takes care of the rest. def query(query): question = query.replace(" ","+") #plus encoding return "http://api.wolframalpha.com/v1/simple?appid={}&i=".format(config.WOLFRAM) + question + "&format=image" #returns ONLY the URL directly. #Telegram's servers handle the requests by themselves for docs lesser than 20MB
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2010-2011, Volkan Esgel # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) # any later version. # # Please read the COPYING file. # # PyQt4 Section from PyQt4.QtCore import * from PyQt4.QtGui import * # PyKDE4 Section from PyKDE4.plasma import Plasma from PyKDE4 import plasmascript # Application Section from notemodel import NoteModel from notedelegate import NoteDelegate class QuickNotes(plasmascript.Applet): def __init__(self, parent, args=None): plasmascript.Applet.__init__(self, parent) def init(self): self.setHasConfigurationInterface(True) self.setAspectRatioMode(Plasma.IgnoreAspectRatio) """ self.theme = Plasma.Svg(self) self.theme.setImagePath("widgets/background") self.setBackgroundHints(Plasma.Applet.DefaultBackground) """ self.setBackgroundHints(Plasma.Applet.NoBackground) self.__createMainLayout() width = self.viewerSize.width() + 20 height = self.viewerSize.height() - 20 self.resize(width, height) def __cr
eateMainLayout(self): self.mainLayout = QGraphicsLinearLayout(Qt.Vertical, self.applet) noteview = Plasma.TreeView(self.applet) noteview.setStyleSheet("QTreeView { background: Transparent }") nmodel = NoteModel(self.package().path(), noteview) noteview.setModel(nmodel) noteview.nativeWidget().setItemDelegate(NoteDelegate(self)) noteview.nati
veWidget().setHeaderHidden(True) noteview.nativeWidget().setIndentation(0) self.mainLayout.addItem(noteview) self.viewerSize = noteview.size() self.applet.setLayout(self.mainLayout) def CreateApplet(parent): return QuickNotes(parent)
: int = 300) -> str" assert doc(m.kw_func_udl_z) == "kw_func_udl_z(x: int, y: int = 0) -> str" assert doc(m.args_function) == "args_function(*args) -> tuple" assert ( doc(m.args_kwargs_function) == "args_kwargs_function(*args, **kwargs) -> tuple" ) assert ( doc(m.KWClass.foo0) == "foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None" ) assert ( doc(m.KWClass.foo1) == "foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None" ) def test_named_arguments(msg): assert m.kw_func0(5, 10) == "x=5, y=10" assert m.kw_func1(5, 10) == "x=5, y=10" assert m.kw_func1(5, y=10) == "x=5, y=10" assert m.kw_func1(y=10, x=5) == "x=5, y=10" assert m.kw_func2() == "x=100, y=200" assert m.kw_func2(5) == "x=5, y=200" assert m.kw_func2(x=5) == "x=5, y=200" assert m.kw_func2(y=10) == "x=100, y=10" assert m.kw_func2(5, 10) == "x=5, y=10" assert m.kw_func2(x=5, y=10) == "x=5, y=10" with pytest.raises(TypeError) as excinfo: # noinspection PyArgumentList m.kw_func2(x=5, y=10, z=12) assert excinfo.match( r"(?s)^kw_func2\(\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))" + "{3}$" ) assert m.kw_func4() == "{13 17}" assert m.kw_func4(myList=[1, 2, 3]) == "{1 2 3}" assert m.kw_func_udl(x=5, y=10) == "x=5, y=10" assert m.kw_func_udl_z(x=5) == "x=5, y=0" def test_arg_and_kwargs(): args = "arg1_value", "arg2_value", 3 assert m.args_function(*args) == args args = "a1", "a2" kwargs = dict(arg3="a3", arg4=4) assert m.args_kwargs_function(*args, **kwargs) == (args, kwargs) def test_mixed_args_and_kwargs(msg): mpa = m.mixed_plus_args mpk = m.mixed_plus_kwargs mpak = m.mixed_plus_args_kwargs mpakd = m.mixed_plus_args_kwargs_defaults assert mpa(1, 2.5, 4, 99.5, None) == (1, 2.5, (4, 99.5, None)) assert mpa(1, 2.5) == (1, 2.5, ()) with pytest.raises(TypeError) as excinfo: assert mpa(1) assert ( msg(excinfo.value) == """ mixed_plus_args(): incompatible function arguments. The following argument types are supported: 1. (arg0: int, arg1: float, *args) -> tuple Invoked with: 1 """ # noqa: E501 line too long ) with pytest.raises(TypeError) as excinfo: assert mpa() assert ( msg(excinfo.value) == """ mixed_plus_args(): incompatible function arguments. The following argument types are supported: 1. (arg0: int, arg1: float, *args) -> tuple
Invoked with: """ # noqa: E501 line too long ) assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == ( -2, 3.5, {"e": 2.71828, "pi": 3.14159}, ) assert mpak(7, 7.7, 7.77, 7.777, 7.7777, minusseven=-7) == ( 7, 7.7, (7.77, 7.777, 7.7777), {"minusseven": -7}, ) assert mpakd() == (1, 3.14159, (), {}) assert mpakd(3) ==
(3, 3.14159, (), {}) assert mpakd(j=2.71828) == (1, 2.71828, (), {}) assert mpakd(k=42) == (1, 3.14159, (), {"k": 42}) assert mpakd(1, 1, 2, 3, 5, 8, then=13, followedby=21) == ( 1, 1, (2, 3, 5, 8), {"then": 13, "followedby": 21}, ) # Arguments specified both positionally and via kwargs should fail: with pytest.raises(TypeError) as excinfo: assert mpakd(1, i=1) assert ( msg(excinfo.value) == """ mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported: 1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple Invoked with: 1; kwargs: i=1 """ # noqa: E501 line too long ) with pytest.raises(TypeError) as excinfo: assert mpakd(1, 2, j=1) assert ( msg(excinfo.value) == """ mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported: 1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple Invoked with: 1, 2; kwargs: j=1 """ # noqa: E501 line too long ) def test_keyword_only_args(msg): assert m.kw_only_all(i=1, j=2) == (1, 2) assert m.kw_only_all(j=1, i=2) == (2, 1) with pytest.raises(TypeError) as excinfo: assert m.kw_only_all(i=1) == (1,) assert "incompatible function arguments" in str(excinfo.value) with pytest.raises(TypeError) as excinfo: assert m.kw_only_all(1, 2) == (1, 2) assert "incompatible function arguments" in str(excinfo.value) assert m.kw_only_some(1, k=3, j=2) == (1, 2, 3) assert m.kw_only_with_defaults(z=8) == (3, 4, 5, 8) assert m.kw_only_with_defaults(2, z=8) == (2, 4, 5, 8) assert m.kw_only_with_defaults(2, j=7, k=8, z=9) == (2, 7, 8, 9) assert m.kw_only_with_defaults(2, 7, z=9, k=8) == (2, 7, 8, 9) assert m.kw_only_mixed(1, j=2) == (1, 2) assert m.kw_only_mixed(j=2, i=3) == (3, 2) assert m.kw_only_mixed(i=2, j=3) == (2, 3) assert m.kw_only_plus_more(4, 5, k=6, extra=7) == (4, 5, 6, {"extra": 7}) assert m.kw_only_plus_more(3, k=5, j=4, extra=6) == (3, 4, 5, {"extra": 6}) assert m.kw_only_plus_more(2, k=3, extra=4) == (2, -1, 3, {"extra": 4}) with pytest.raises(TypeError) as excinfo: assert m.kw_only_mixed(i=1) == (1,) assert "incompatible function arguments" in str(excinfo.value) with pytest.raises(RuntimeError) as excinfo: m.register_invalid_kw_only(m) assert ( msg(excinfo.value) == """ arg(): cannot specify an unnamed argument after an kw_only() annotation """ ) def test_positional_only_args(msg): assert m.pos_only_all(1, 2) == (1, 2) assert m.pos_only_all(2, 1) == (2, 1) with pytest.raises(TypeError) as excinfo: m.pos_only_all(i=1, j=2) assert "incompatible function arguments" in str(excinfo.value) assert m.pos_only_mix(1, 2) == (1, 2) assert m.pos_only_mix(2, j=1) == (2, 1) with pytest.raises(TypeError) as excinfo: m.pos_only_mix(i=1, j=2) assert "incompatible function arguments" in str(excinfo.value) assert m.pos_kw_only_mix(1, 2, k=3) == (1, 2, 3) assert m.pos_kw_only_mix(1, j=2, k=3) == (1, 2, 3) with pytest.raises(TypeError) as excinfo: m.pos_kw_only_mix(i=1, j=2, k=3) assert "incompatible function arguments" in str(excinfo.value) with pytest.raises(TypeError) as excinfo: m.pos_kw_only_mix(1, 2, 3) assert "incompatible function arguments" in str(excinfo.value) with pytest.raises(TypeError) as excinfo: m.pos_only_def_mix() assert "incompatible function arguments" in str(excinfo.value) assert m.pos_only_def_mix(1) == (1, 2, 3) assert m.pos_only_def_mix(1, 4) == (1, 4, 3) assert m.pos_only_def_mix(1, 4, 7) == (1, 4, 7) assert m.pos_only_def_mix(1, 4, k=7) == (1, 4, 7) with pytest.raises(TypeError) as excinfo: m.pos_only_def_mix(1, j=4) assert "incompatible function arguments" in str(excinfo.value) def test_signatures(): assert "kw_only_all(*, i: int, j: int) -> tuple\n" == m.kw_only_all.__doc__ assert "kw_only_mixed(i: int, *, j: int) -> tuple\n" == m.kw_only_mixed.__doc__ assert "pos_only_all(i: int, j: int, /) -> tuple\n" == m.pos_only_all.__doc__ assert "pos_only_mix(i: int, /, j: int) -> tuple\n" == m.pos_only_mix.__doc__ assert ( "pos_kw_only_mix(i: int, /, j: int, *, k: int) -> tuple\n" == m.pos_kw_only_mix.__doc__ ) @pytest.mark.xfail("env.PYPY and env.PY2", reason="PyPy2 doesn't double count") def test_args_refcount(): """Issue/PR #1216 - py::args elements get double-inc_ref()ed when combined with regular arguments""" refcount = m.arg_refcount_h myval = 54321 expected = refcount(myval) assert m.arg_refcount_h(myval) == expected assert m.arg_refcount_o(myval) == expected + 1 assert m.arg_refcount_h(myval) == expected assert refcount(myval) == expected assert m.mixed_plus_args(1, 2.0, "a", myval) == (1, 2.0, ("a", myval)) assert refcount(my
import random def preprocessing(maze
Map, mazeWidth, mazeHeight, playerLocation, opponentLocation, piecesOfCheese, timeAllowed): return def turn (mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed): return def postprocessing (ma
zeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed): return
ces = services self.zapFunc = zapFunc if bouquetname != "": Screen.setTitle(self, bouquetname) self["list"] = EPGList( selChangedCB = self.onSelectionChanged, timer = self.session.nav.RecordTimer, time_epoch = config.misc.graph_mepg.prev_time_period.value, overjump_empty = config.misc.graph_mepg.overjump.value) HelpableScreen.__init__(self) self["okactions"] = HelpableActionMap(self, "OkCancelActions", { "cancel": (self.closeScreen, _("Exit EPG")), "ok": (self.eventSelected, _("Zap to selected channel, or show detailed event info (depends on configuration)")) }, -1) self["okactions"].csel = self self["epgactions"] = HelpableActionMap(self, "EPGSelectActions", { "timerAdd": (self.timerAdd, _("Add/remove timer for current event")), "info": (self.infoKeyPressed, _("Show detailed event info")), "red": (self.zapTo, _("Zap to selected channel")), "yellow": (self.swapMode, _("Switch between normal mode and list mode")), "blue": (self.enterDateTime, _("Goto specific data/time")), "menu": (self.showSetup, _("Setup menu")), "nextBouquet": (self.nextBouquet, _("Show bouquet selection menu")), "prevBouquet": (self.prevBouquet, _("Show bouquet selection menu")), "nextService": (self.nextPressed, _("Goto next page of events")), "prevService": (self.prevPressed, _("Goto previous page of events")), "preview": (self.preview, _("Preview selected channel")), "nextDay": (self.nextDay, _("Goto next day of events")), "prevDay": (self.prevDay, _("Goto previous day of events")) }, -1) self["epgactions"].csel = self self["inputactions"] = HelpableActionMap(self, "InputActions", { "left": (self.leftPressed, _("Go to previous event")), "right": (self.rightPressed, _("Go to next event")), "1": (self.key1, _("Set time window to 1 hour")), "2": (self.key2, _("Set time window to 2 hours")), "3": (self.key3, _("Set time window to 3 hours")), "4": (self.key4, _("Set time window to 4 hours")), "5": (self.key5, _("Set time window to 5 hours")), "6": (self.key6, _("Set time window to 6 hours")), "7": (self.prevPage, _("Go to previous page of service")), "9":
(self.nextPage, _("Go to next page of service")), "8": (self.toTop, _("Go to first service")), "0": (self.toEnd, _("Go to last service")) }, -1) self["inputactions"].csel = self self.updateTimelineTimer = eTimer() self.updateTimelineTimer.callback.append(self.moveTimeLines) self.updateTimelineTimer.start(60 * 1000) self.onLayoutFinish.append(self.onCreate) sel
f.previousref = self.session.nav.getCurrentlyPlayingServiceOrGroup() def prevPage(self): self["list"].moveTo(eListbox.pageUp) def nextPage(self): self["list"].moveTo(eListbox.pageDown) def toTop(self): self["list"].moveTo(eListbox.moveTop) def toEnd(self): self["list"].moveTo(eListbox.moveEnd) def prevPressed(self): self.updEvent(-2) def nextPressed(self): self.updEvent(+2) def leftPressed(self): self.updEvent(-1) def rightPressed(self): self.updEvent(+1) def prevDay(self): self.updEvent(-3) def nextDay(self): self.updEvent(+3) def updEvent(self, dir, visible = True): ret = self["list"].selEntry(dir, visible) if ret: self.moveTimeLines(True) def updEpoch(self, mins): self["list"].setEpoch(mins) config.misc.graph_mepg.prev_time_period.value = mins self.moveTimeLines() def key1(self): self.updEpoch(60) def key2(self): self.updEpoch(120) def key3(self): self.updEpoch(180) def key4(self): self.updEpoch(240) def key5(self): self.updEpoch(300) def key6(self): self.updEpoch(360) def nextBouquet(self): if self.bouquetChangeCB: self.bouquetChangeCB(1, self) def prevBouquet(self): if self.bouquetChangeCB: self.bouquetChangeCB(-1, self) def enterDateTime(self): t = localtime(time()) config.misc.graph_mepg.prev_time.value = [t.tm_hour, t.tm_min] self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.misc.graph_mepg.prev_time) def onDateTimeInputClosed(self, ret): if len(ret) > 1: if ret[0]: now = time() - config.epg.histminutes.getValue() * 60 self.ask_time = ret[1] if ret[1] >= now else now self.ask_time = self.ask_time - self.ask_time % int(config.misc.graph_mepg.roundTo.getValue()) l = self["list"] l.resetOffset() l.fillMultiEPG(None, self.ask_time) self.moveTimeLines(True) def showSetup(self): self.session.openWithCallback(self.onSetupClose, GraphMultiEpgSetup) def onSetupClose(self, ignore = -1): l = self["list"] l.setItemsPerPage() l.setEventFontsize() l.setEpoch(config.misc.graph_mepg.prev_time_period.value) l.setOverjump_Empty(config.misc.graph_mepg.overjump.value) l.setShowServiceMode(config.misc.graph_mepg.servicetitle_mode.value) now = time() - config.epg.histminutes.getValue() * 60 self.ask_time = now - now % int(config.misc.graph_mepg.roundTo.getValue()) self["timeline_text"].setDateFormat(config.misc.graph_mepg.servicetitle_mode.value) l.fillMultiEPG(None, self.ask_time) self.moveTimeLines(True) def closeScreen(self): self.zapFunc(None, zapback = True) config.misc.graph_mepg.save() self.close(False) def infoKeyPressed(self): cur = self["list"].getCurrent() event = cur[0] service = cur[1] if event is not None: self.session.open(EventViewEPGSelect, event, service, self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList) def openSimilarList(self, eventid, refstr): self.session.open(EPGSelection, refstr, None, eventid) def openSingleServiceEPG(self): ref = self["list"].getCurrent()[1].ref.toString() if ref: self.session.open(EPGSelection, ref) def openMultiServiceEPG(self): if self.services: self.session.openWithCallback(self.doRefresh, EPGSelection, self.services, self.zapFunc, None, self.bouquetChangeCB) def setServices(self, services): self.services = services self.onCreate() def doRefresh(self, answer): serviceref = self.session.nav.getCurrentlyPlayingServiceOrGroup() l = self["list"] l.moveToService(serviceref) l.setCurrentlyPlaying(serviceref) self.moveTimeLines() def onCreate(self): serviceref = self.session.nav.getCurrentlyPlayingServiceOrGroup() l = self["list"] l.setShowServiceMode(config.misc.graph_mepg.servicetitle_mode.value) self["timeline_text"].setDateFormat(config.misc.graph_mepg.servicetitle_mode.value) l.fillMultiEPG(self.services, self.ask_time) l.moveToService(serviceref) l.setCurrentlyPlaying(serviceref) self.moveTimeLines() def eventViewCallback(self, setEvent, setService, val): l = self["list"] old = l.getCurrent() self.updEvent(val, False) cur = l.getCurrent() if cur[0] is None and cur[1].ref != old[1].ref: self.eventViewCallback(setEvent, setService, val) else: setService(cur[1]) setEvent(cur[0]) def preview(self): ref = self["list"].getCurrent()[1] if ref: self.zapFunc(ref.ref, preview = True) self["list"].setCurrentlyPlaying(ref.ref) self["list"].l.invalidate() def zapTo(self): if self.zapFunc and self.key_red_choice == self.ZAP: ref = self["list"].getCurrent()[1] if ref: self.zapFunc(ref.ref) if self.previousref and self.previousref == ref.ref: config.misc.graph_mepg.save() self.close(True) self.previousref = ref.ref self["list"].setCurrentlyPlaying(ref.ref) self["list"].l.invalidate() def swapMode(self): global listscreen listscreen = not listscreen self.close(None) def eventSelected(self): if config.misc.graph_mepg.OKButton.value == "info": self.infoKeyPressed() else: self.zapTo() def removeTimer(self, timer): timer.afterEvent = AFTEREVENT.NONE self.session.nav.RecordTimer.removeEntry(timer) self["key_green"].setText(_("Add timer")) self.key_green_choice = self.ADD_TIMER def timerAdd(self): cur = self["list"].
behavior of the Parameter. Parameters may have parent / child / sibling relationships to construct organized hierarchies. Parameters generally do not have any inherent GUI or visual interpretation; instead they manage ParameterItem instances which take care of display and user interaction. Note: It is fairly uncommon to use the Parameter class directly; mostly you will use subclasses which provide specialized type and data handling. The static pethod Parameter.create(...) is an easy way to generate instances of these subclasses. For more Parameter types, see ParameterTree.parameterTypes module. =================================== ========================================================= **Signals:** sigStateChanged(self, change, info) Emitted when anything changes about this parameter at all. The second argument is a string indicating what changed ('value', 'childAdded', etc..) The third argument can be any extra information about the change sigTreeStateChanged(self, changes) Emitted when any child in the tree changes state (but only if monitorChildren() is called) the format of *changes* is [(param, change, info), ...] sigValueChanged(self, value) Emitted when value is finished changing sigValueChanging(self, value) Emitted immediately for all value changes, including during editing. sigChildAdded(self, child, index) Emitted when a child is added sigChildRemoved(self, child) Emitted when a child is removed sigRemoved(self) Emitted when this parameter is removed sigParentChanged(self, parent) Emitted when this parameter's parent has changed sigLimitsChanged(self, limits) Emitted when this parameter's limits have changed sigDefaultChanged(self, default) Emitted when this parameter's default value has changed sigNameChanged(self, name) Emitted when this parameter's name has changed sigOptionsChanged(self, opts) Emitted when any of this parameter's options have changed =================================== ========================================================= """ ## name, type, limits, etc. ## can also carry UI hints (slider vs spinbox, etc.) sigValueChanged = QtCore.Signal(object, object) ## self, value emitted when value is finished being edited sigValueChanging = QtCore.Signal(object, object) ## self, value emitted as value is being edited sigChildAdded = QtCore.Signal(object, object, object) ## self, child, index sigChildRemoved = QtCore.Signal(object, object) ## self, child sigRemoved = QtCore.Signal(object) ## self sigParentChanged = QtCore.Signal(object, object) ## self, parent sigLimitsChanged = QtCore.Signal(object, object) ## self, limits sigDefaultChanged = QtCore.Signal(object, object) ## self, default sigNameChanged = QtCore.Signal(object, object) ## self, name sigOptionsChanged = QtCore.Signal(object, object) ## self, {opt:val, ...} ## Emitted when anything changes about this parameter at all. ## The second argument is a string indicating what changed ('value', 'childAdded', etc..) ## The third argument can be any extra information about the change sigStateChanged = QtCore.Signal(object, object, object) ## self, change, info ## emitted when any child in the tree changes state ## (but only if monitorChildren() is called) sigTreeStateChanged = QtCore.Signal(object, object) # self, changes # changes = [(param, change, info), ...] # bad planning. #def __new__(cls, *args, **opts): #try: #cls = PARAM_TYPES[opts['type']] #except KeyError: #pass #return QtCore.QObject.__new__(cls, *args, **opts) @staticmethod def create(**opts): """ Static method that creates a new Parameter (or subclass) instance using opts['type'] to select the appropriate class. All options are passed directly to the new Parameter's __init__ method. Use registerParameterType() to add new class types. """ typ = opts.get('type', None) if typ is None: cls = Parameter else: cls = PARAM_TYPES[opts['type']] return cls(**opts) def __init__(self, **opts): """ Initialize a Parameter object. Although it is rare to directly create a Parameter instance, the options available to this method are also allowed by most Parameter subclasses. ================= ========================================================= Keyword Arguments name The name to give this Parameter. This is the name that will appear in the left-most column of a ParameterTree for this Parameter. value The value to initially assign to this Parameter. default The default value for this Parameter (most Parameters provide an option to 'reset to default'). children A list of children for this Parameter. Children may be given either as a Parameter instance or as a dictionary to pass to Parameter.create(). In this way, it is possible to specify complex hierarchies of Parameters from a single nested data structure. readonly If True, the user will not be allowed to edit this Parameter. (default=False) enabled If False, any widget(s) for this parameter will appear disabled. (default=True) visible If False, the Parameter will not appear when displayed in a ParameterTree. (default=True) renamable If True, the user may rename this Parameter. (default=False) removable If True, the user may remove this Parameter. (default=False) expanded If True, the Parameter will appear expanded when displayed in a ParameterTree (its children will be visible). (default=True) ================= ========================================================= """ QtCore.QObject.__init__(self) self.opts = { 'type': None, 'readonly': False, 'visible': True, 'enabled': True, 'renamable': False, 'removable': False, 'strictNaming': False, # forces name to be usable as a python
variable 'expanded': True, #'limits': None, ## Thi
s is a bad plan--each parameter type may have a different data type for limits. } self.opts.update(opts) self.childs = [] self.names = {} ## map name:child self.items = weakref.WeakKeyDictionary() ## keeps track of tree items representing this parameter self._parent = None self.treeStateChanges = [] ## cache of tree state changes to be delivered on next emit self.blockTreeChangeEmit = 0 #self.monitoringChildren = False ## prevent calling monitorChildren more than once if 'value' not in self.opts: self.opts['value'] = None if 'name' not in self.opts or not isinstance(self.opts['name'], basestring): raise Exception("Parameter must have a string name specified in opts.") self.setName(opts['name']) self.addChildren(self.opt
from protoplot.engine.options_container import OptionsContainer from protoplot.engine.tag import make_tags_list from protoplot.engine.item_metaclass import ItemMetaclass # @UnusedImport from protoplot.engine.item_container import ItemContainer # TODO options should be resolved in the proper order. Here's the proposed # resulting order for series: # my_series .set(...) # my_plot .series.all.set(...) # my_page .plots.all.series.all.set(...) # Page.all.plots.all.series.all.set(...) # Plot .all.series.all.set(...) # Series.all.set(...) # For testability, a resolved option should store probably store a complete list # of values in order of priority. class Item(metaclass=ItemMetaclass): ''' Represents an item in the tree. Items typically contain (a) other items, and (b) item containers. An Item *instance* has the following attributes: * An "options" property (of type OptionsContainer), which contains the options for this specific instance. * A "set" method as a shortcut for setting these options. * A (potentially empty) set of tags to allow selective application
of options (a tag is similar to a class in CSS). An Item *subclass* has the following (class) attributes: * An item accessor which will return a template item in
stance for a given tag specification, which can be a string or the empty slice to specify the default template. (TODO really empty slice?) * An "all" property as a shortcut for [:] * A "set" method as a shortcut for [:].set * A constructor taking options like the set method Item subclasses should call the Item constructor with all *args and **kwargs and define a register_options method to register the options, like so: self.options.register("color", False, "black") Note that the Item constructor, which runs before the Item subclass constructor, sets the initial options. The options must already be registered at this point, so this cannot be done by the Item subclass constructor. ''' def __init__(self, **kwargs): ''' All kwargs will be used as options, except: * tag => use as tag(s) ''' # Create the tag list and remove the tag argument from kwargs. if 'tag' in kwargs: self.tags = make_tags_list(kwargs['tag']) del kwargs['tag'] else: self.tags = [] # Create the instance-level options and initialize them from the # remaining kwargs. #self.options = OptionsContainer(self.__class__.options) self.options = OptionsContainer() # Subclasses must override this method. We cannot do this in the # subclass constructor because it must be done before setting the kwargs # as options. self.register_options() self.options.set(**kwargs) # Add the instance-level set method. See __set for an explanation. self.set = self.__set ############## ## Children ## ############## def children(self): return [(name, attribute) for name, attribute in self.__dict__.items() if isinstance(attribute, Item)] def containers(self): return [(name, attribute) for name, attribute in self.__dict__.items() if isinstance(attribute, ItemContainer)] ############# ## Options ## ############# def register_options(self): raise NotImplementedError( "Item subclasses must implement the register_options method") def __set(self, **kwargs): ''' A setter shortcut for the instance-level options. This can't be called "set" because there is already a class method with the same name (defined in the metaclass) and Python does not have separate namespaces for class methods and instance methods. Therefore, this method will be assigned to the name of "set" in the instance namespace by the constructor. ''' self.options.set(**kwargs) def resolve_options(self, templates = None, inherited = None, indent="", verbose = False): def p(*args, **kwargs): if verbose: print(indent, *args, **kwargs) p("Resolve options for", self) p("* Templates:", templates) p("* Tags:", self.tags) # Determine the applicable templates: the ones kindly selected by our # parent, plus the matching templates from our own class. templates = templates or [] templates = templates + type(self).matching_templates(self.tags) template_option_containers = [t.options for t in templates] inherited = inherited or dict() # Determine the options for self own_options = self.options.resolve(template_option_containers, inherited) #print(indent+"* Own options: {}".format(own_options)) # Determine the options for direct children (recursively) children_options = {} for name, child in self.children(): p("* Child", name) child_templates = [ getattr(template, name) for template in templates ] child_inherited = own_options child_options = child.resolve_options(child_templates, child_inherited, indent = indent+" ", verbose = verbose) children_options.update(child_options) # Determine the options for children in containers (recursively) containers_options = {} for name, container in self.containers(): p("* Container", name, container) template_containers = [ getattr(template, name) for template in templates + [self] ] p("* Template_containers", template_containers) for child in container.items: # Select the matching templates for the child child_templates = [] for container in template_containers: child_templates += container.matching_templates(child.tags) child_inherited = own_options child_options = child.resolve_options(child_templates, own_options, indent = indent+" ", verbose = verbose) containers_options.update(child_options) result = {} result[self] = own_options result.update(children_options) result.update(containers_options) return result
t): for k, v in element.items(): html_class = 'collapsible_group' # This allows us to later (in JavaScript) recursively highlight sections # that are likely of interest to the user, i.e. whose exp
ectations can be # modified. if k and FULL_PASS in k: html_class = 'highlighted_coll
apsible_group' file_handle.write('<button type="button" class="%s">%s</button>\n' % (html_class, k)) file_handle.write('<div class="content">\n') _RecursiveHtmlToFile(v, file_handle) file_handle.write('</div>\n') elif isinstance(element, list): for i in element: _RecursiveHtmlToFile(i, file_handle) else: raise RuntimeError('Given unhandled type %s' % type(element)) def _LinkifyString(s): """Turns instances of links into anchor tags. Args: s: The string to linkify. Returns: A copy of |s| with instances of links turned into anchor tags pointing to the link. """ for component in s.split(): if component.startswith('http'): component = component.strip(',.!') s = s.replace(component, '<a href="%s">%s</a>' % (component, component)) return s def _ConvertTestExpectationMapToStringDict(test_expectation_map): """Converts |test_expectation_map| to a dict of strings for reporting. Args: test_expectation_map: A data_types.TestExpectationMap. Returns: A string dictionary representation of |test_expectation_map| in the following format: { expectation_file: { test_name: { expectation_summary: { builder_name: { 'Fully passed in the following': [ step1, ], 'Partially passed in the following': { step2: [ failure_link, ], }, 'Never passed in the following': [ step3, ], } } } } } """ assert isinstance(test_expectation_map, data_types.TestExpectationMap) output_dict = {} # This initially looks like a good target for using # data_types.TestExpectationMap's iterators since there are many nested loops. # However, we need to reset state in different loops, and the alternative of # keeping all the state outside the loop and resetting under certain # conditions ends up being less readable than just using nested loops. for expectation_file, expectation_map in test_expectation_map.items(): output_dict[expectation_file] = {} for expectation, builder_map in expectation_map.items(): test_name = expectation.test expectation_str = _FormatExpectation(expectation) output_dict[expectation_file].setdefault(test_name, {}) output_dict[expectation_file][test_name][expectation_str] = {} for builder_name, step_map in builder_map.items(): output_dict[expectation_file][test_name][expectation_str][ builder_name] = {} fully_passed = [] partially_passed = {} never_passed = [] for step_name, stats in step_map.items(): if stats.NeverNeededExpectation(expectation): fully_passed.append(AddStatsToStr(step_name, stats)) elif stats.AlwaysNeededExpectation(expectation): never_passed.append(AddStatsToStr(step_name, stats)) else: assert step_name not in partially_passed partially_passed[step_name] = stats output_builder_map = output_dict[expectation_file][test_name][ expectation_str][builder_name] if fully_passed: output_builder_map[FULL_PASS] = fully_passed if partially_passed: output_builder_map[PARTIAL_PASS] = {} for step_name, stats in partially_passed.items(): s = AddStatsToStr(step_name, stats) output_builder_map[PARTIAL_PASS][s] = list(stats.failure_links) if never_passed: output_builder_map[NEVER_PASS] = never_passed return output_dict def _ConvertUnmatchedResultsToStringDict(unmatched_results): """Converts |unmatched_results| to a dict of strings for reporting. Args: unmatched_results: A dict mapping builder names (string) to lists of data_types.Result who did not have a matching expectation. Returns: A string dictionary representation of |unmatched_results| in the following format: { test_name: { builder_name: { step_name: [ individual_result_string_1, individual_result_string_2, ... ], ... }, ... }, ... } """ output_dict = {} for builder, results in unmatched_results.items(): for r in results: builder_map = output_dict.setdefault(r.test, {}) step_map = builder_map.setdefault(builder, {}) result_str = 'Got "%s" on %s with tags [%s]' % ( r.actual_result, data_types.BuildLinkFromBuildId( r.build_id), ' '.join(r.tags)) step_map.setdefault(r.step, []).append(result_str) return output_dict def _ConvertUnusedExpectationsToStringDict(unused_expectations): """Converts |unused_expectations| to a dict of strings for reporting. Args: unused_expectations: A dict mapping expectation file (str) to lists of data_types.Expectation who did not have any matching results. Returns: A string dictionary representation of |unused_expectations| in the following format: { expectation_file: [ expectation1, expectation2, ], } The expectations are in a format similar to what would be present as a line in an expectation file. """ output_dict = {} for expectation_file, expectations in unused_expectations.items(): expectation_str_list = [] for e in expectations: expectation_str_list.append( '[ %s ] %s [ %s ]' % (' '.join(e.tags), e.test, ' '.join(e.expected_results))) output_dict[expectation_file] = expectation_str_list return output_dict def _FormatExpectation(expectation): return '"%s" expectation on "%s"' % (' '.join( expectation.expected_results), ' '.join(expectation.tags)) def AddStatsToStr(s, stats): return '%s %s' % (s, stats.GetStatsAsString()) def OutputAffectedUrls(removed_urls, orphaned_urls=None): """Outputs URLs of affected expectations for easier consumption by the user. Outputs the following: 1. A string suitable for passing to Chrome via the command line to open all bugs in the browser. 2. A string suitable for copying into the CL description to associate the CL with all the affected bugs. 3. A string containing any bugs that should be closable since there are no longer any associated expectations. Args: removed_urls: A set or list of strings containing bug URLs. orphaned_urls: A subset of |removed_urls| whose bugs no longer have any corresponding expectations. """ removed_urls = list(removed_urls) removed_urls.sort() orphaned_urls = orphaned_urls or [] orphaned_urls = list(orphaned_urls) orphaned_urls.sort() _OutputAffectedUrls(removed_urls, orphaned_urls) _OutputUrlsForClDescription(removed_urls, orphaned_urls) def _OutputAffectedUrls(affected_urls, orphaned_urls, file_handle=None): """Outputs |urls| for opening in a browser as affected bugs. Args: affected_urls: A list of strings containing URLs to output. orphaned_urls: A list of strings containing URLs to output as closable. file_handle: A file handle to write the string to. Defaults to stdout. """ _OutputUrlsForCommandLine(affected_urls, "Affected bugs", file_handle) if orphaned_urls: _OutputUrlsForCommandLine(orphaned_urls, "Closable bugs", file_handle) def _OutputUrlsForCommandLine(urls, description, file_handle=None): """Outputs |urls| for opening in a browser. The output string is meant to be passed to a browser via the command line in order to open all URLs in that browser, e.g. `google-chrome https://crbug.com/1234 https://crbug.com/2345` Args: urls: A list of strings containing URLs to output. des
out.add_events(gtk.gdk.LEAVE_NOTIFY_MASK) self.layout.connect("leave-notify-event", self.leave) def update(self): self.queue_draw() def set_position_for_selected(self, x): self.gradient.set_position(self.selected, x) def set_color_for_selected(self, color): color.position = self.gradient.colors[self.selected].position self.gradient.set_color(self.selected, color) def motion(self, widget, event): self._motion = True self.x = event.x if self.move: if self.selected >= 0: if self.moving_callback: self.moving_callback(event.x / self.width) self.set_position_for_selected(event.x / self.width) self.gradient.update() self.queue_draw() return True def enter(self, widget, event): return True def leave(self, widget, event): self._motion = False self.x = event.x self.queue_draw() return True def press(self, widget, event): self.move = True cnt = len(self.gradient.colors) if cnt > 0: for col in range(0, cnt): if (self.gradient.colors[col].position > (event.x / self.width - 0.01)) and ( self.gradient.colors[col].position < (event.x / self.width + 0.01)): self.selected = col self.moving_callback(self.gradient.colors[col].position) self.color_callback(self.gradient.colors[col]) break else: self.selected = -1 if self.selected == -1 or not cnt: self.gradient.add_new_color(GradientColor(1, 1, 0.1, 1.0, event.x / self.width)) self.selected = len(self.gradient.colors)-1 self.moving_callback(self.gradient.colors[self.selected].position) self.color_callback(self.gradient.colors[self.selected]) self.gradient.update() self.queue_draw() def release(self, widget, event): self.move = False self.queue_draw() def expose(self, widget, event): context = widget.bin_window.cairo_create() self.width, self.height = widget.window.get_size() context.save() context.new_path() #context.translate(0, 0) if (self.width > 0) and (self.height > 0): context.scale(self.width, self.height) context.rectangle(0, 0, 1, 1) context.set_source(self.gradient.gradient) context.fill_preserve() context.restore() if self._motion and not self.move: context.new_path() dash = list() context.set_dash(dash) context.set_line_width(2) context.move_to(self.x, 0) context.line_to(self.x, 30) context.move_to(self.x, self.height - 30) context.line_to(self.x, self.height) scol = sorted(self.gradient.colors, key=lambda color: color.position) # better in __init__ and update when necessary cnt = len(scol) rx = self.x / self.width index = 0 for col in scol: if rx < col.position:
for c in range(0, cnt): if self.gradient.colors[c].position == col.position: index = c break break r = self.gradient.colors[index].red g = self.gradient.colors[index].green b = self.gradient.colors[index].blue l = 1 - (r + g + b) / 3.0 if l >= 0.5: l = 1 else:
l = 0 r, g, b = l, l, l context.set_source_rgba(r, g, b, 1.0) context.stroke() for color in range(len(self.gradient.colors)): if color == self.selected: delta = 10 else: delta = 0 context.new_path() pos = int(self.width * self.gradient.colors[color].position) context.move_to(pos - 5, 0) context.line_to(pos + 5, 0) context.line_to(pos, 20) context.line_to(pos - 5, 0) context.set_source_rgb(self.gradient.colors[color].alpha, self.gradient.colors[color].alpha, self.gradient.colors[color].alpha) context.fill_preserve() if delta: context.move_to(pos, 20) context.line_to(pos, 20 + delta) context.set_source_rgb(0.44, 0.62, 0.81) context.stroke() class LinearGradientEditor(gtk.VBox, Signalizable): def __init__(self): gtk.VBox.__init__(self) from canvas import Canvas self.canvas = Canvas() table = gtk.Table(4, 4, False) self.pack_start(table) self.combobox = gtk.combo_box_new_text() table.attach(self.combobox, 1, 2, 0, 1, gtk.FILL, 0) gradient = Gradient() self.gl = GradientLine(self.moving_callback, self.color_callback, gradient) table.attach(self.gl, 1, 2, 1, 2, gtk.FILL | gtk.EXPAND, 0) new_color = gtk.Button() image = gtk.Image() image.set_from_stock(gtk.STOCK_NEW, gtk.ICON_SIZE_MENU) new_color.add(image) table.attach(new_color, 2, 3, 0, 1, 0, 0, 0) button = gtk.Button() image = gtk.Image() image.set_from_stock(gtk.STOCK_GO_FORWARD, gtk.ICON_SIZE_MENU) button.add(image) button.connect("clicked", self.forward) table.attach(button, 2, 3, 1, 2, 0, gtk.FILL, 0) button = gtk.Button() image = gtk.Image() image.set_from_stock(gtk.STOCK_GO_BACK, gtk.ICON_SIZE_MENU) button.add(image) button.connect("clicked", self.back) table.attach(button, 0, 1, 1, 2, 0, gtk.FILL, 0) hbox = gtk.HBox() label = gtk.Label(_("Color:")) hbox.pack_start(label) self.color_button = gtk.ColorButton() self.color_button.set_use_alpha(True) self.color_button.connect("color-set", self.set_gradient_color) hbox.pack_start(self.color_button) label = gtk.Label(_("Position:")) hbox.pack_start(label) self.sel_position = gtk.SpinButton(climb_rate=0.00001, digits=5) self.sel_position.set_range(0.0, 1.0) self.sel_position.set_wrap(True) self.sel_position.set_increments(0.00001, 0.1) self.sel_position.connect("value-changed", self.move_color) hbox.pack_start(self.sel_position) table.attach(hbox, 1, 2, 2, 3, gtk.FILL, 0, 0) self.install_signal("update") self.show_all() def set_value(self, value): self.gl.gradient = Gradient(string=str(value)) def forward(self, widget): if self.gl: if self.gl.selected < len(self.gl.gradient.colors) - 1: self.gl.selected += 1 else: self.gl.selected = -1 self.moving_callback(self.gl.gradient.colors[self.gl.selected].position) self.update() def back(self, widget): if self.gl: if self.gl.selected > -1: self.gl.selected -= 1 else: self.gl.selected = len(self.gl.gradient.colors) - 1 self.moving_callback(self.gl.gradient.colors[self.gl.selected].position) self.update() def moving_callback(self, x): self.sel_position.set_value(x) self.update() def color_callback(self, color): self.color_button.set_color(gtk.gdk.Color(float(color.red), float(color.green), float(color.blue))) self.color_button.set_alpha(int(color.alpha * 65535)) self.update() def move_color(self, widget): if self.gl: self.gl.set_position_for_selected(widget.get_value()) self.update() def set_gradient_color(self, widget): if self.gl: col = GradientColor(widget.get_color().red_float, widget.get_color().green_float, widget.get_color().blue_float,
one, test_phone_numbers: dict = None, project: str = None, service_account_file: str = "", ): channel.initialize() self.name = name self.display_name = display_name self.allow_password_signup = allow_password_signup self.enable_email_link_signin = enable_email_link_signin self.disable_auth = disable_auth self.enable_anonymous_user = enable_anonymous_user self.mfa_config = mfa_config self.test_phone_numbers = test_phone_numbers self.project = project self.service_account_file = service_account_file def apply(self): stub = tenant_pb2_grpc.IdentitytoolkitBetaTenantServiceStub(channel.Channel()) request = tenant_pb2.ApplyIdentitytoolkitBetaTenantRequest() if Primitive.to_proto(self.name): request
.resource.name = Primitive.to_proto(self.name) if Primitive.to_proto(self.display_name): request.resource.display_name = Primitive.to_proto(self.display_name) if Primitive.to_proto(self.allow_password_signup): request.resource.allow_password_signup = Primitive.to_proto( self.allow_password_signup ) if Primitive.to_proto(self.enable_email_link_signin): request.resource.enable_email_link_signin
= Primitive.to_proto( self.enable_email_link_signin ) if Primitive.to_proto(self.disable_auth): request.resource.disable_auth = Primitive.to_proto(self.disable_auth) if Primitive.to_proto(self.enable_anonymous_user): request.resource.enable_anonymous_user = Primitive.to_proto( self.enable_anonymous_user ) if TenantMfaConfig.to_proto(self.mfa_config): request.resource.mfa_config.CopyFrom( TenantMfaConfig.to_proto(self.mfa_config) ) else: request.resource.ClearField("mfa_config") if Primitive.to_proto(self.test_phone_numbers): request.resource.test_phone_numbers = Primitive.to_proto( self.test_phone_numbers ) if Primitive.to_proto(self.project): request.resource.project = Primitive.to_proto(self.project) request.service_account_file = self.service_account_file response = stub.ApplyIdentitytoolkitBetaTenant(request) self.name = Primitive.from_proto(response.name) self.display_name = Primitive.from_proto(response.display_name) self.allow_password_signup = Primitive.from_proto( response.allow_password_signup ) self.enable_email_link_signin = Primitive.from_proto( response.enable_email_link_signin ) self.disable_auth = Primitive.from_proto(response.disable_auth) self.enable_anonymous_user = Primitive.from_proto( response.enable_anonymous_user ) self.mfa_config = TenantMfaConfig.from_proto(response.mfa_config) self.test_phone_numbers = Primitive.from_proto(response.test_phone_numbers) self.project = Primitive.from_proto(response.project) def delete(self): stub = tenant_pb2_grpc.IdentitytoolkitBetaTenantServiceStub(channel.Channel()) request = tenant_pb2.DeleteIdentitytoolkitBetaTenantRequest() request.service_account_file = self.service_account_file if Primitive.to_proto(self.name): request.resource.name = Primitive.to_proto(self.name) if Primitive.to_proto(self.display_name): request.resource.display_name = Primitive.to_proto(self.display_name) if Primitive.to_proto(self.allow_password_signup): request.resource.allow_password_signup = Primitive.to_proto( self.allow_password_signup ) if Primitive.to_proto(self.enable_email_link_signin): request.resource.enable_email_link_signin = Primitive.to_proto( self.enable_email_link_signin ) if Primitive.to_proto(self.disable_auth): request.resource.disable_auth = Primitive.to_proto(self.disable_auth) if Primitive.to_proto(self.enable_anonymous_user): request.resource.enable_anonymous_user = Primitive.to_proto( self.enable_anonymous_user ) if TenantMfaConfig.to_proto(self.mfa_config): request.resource.mfa_config.CopyFrom( TenantMfaConfig.to_proto(self.mfa_config) ) else: request.resource.ClearField("mfa_config") if Primitive.to_proto(self.test_phone_numbers): request.resource.test_phone_numbers = Primitive.to_proto( self.test_phone_numbers ) if Primitive.to_proto(self.project): request.resource.project = Primitive.to_proto(self.project) response = stub.DeleteIdentitytoolkitBetaTenant(request) @classmethod def list(self, project, service_account_file=""): stub = tenant_pb2_grpc.IdentitytoolkitBetaTenantServiceStub(channel.Channel()) request = tenant_pb2.ListIdentitytoolkitBetaTenantRequest() request.service_account_file = service_account_file request.Project = project return stub.ListIdentitytoolkitBetaTenant(request).items def to_proto(self): resource = tenant_pb2.IdentitytoolkitBetaTenant() if Primitive.to_proto(self.name): resource.name = Primitive.to_proto(self.name) if Primitive.to_proto(self.display_name): resource.display_name = Primitive.to_proto(self.display_name) if Primitive.to_proto(self.allow_password_signup): resource.allow_password_signup = Primitive.to_proto( self.allow_password_signup ) if Primitive.to_proto(self.enable_email_link_signin): resource.enable_email_link_signin = Primitive.to_proto( self.enable_email_link_signin ) if Primitive.to_proto(self.disable_auth): resource.disable_auth = Primitive.to_proto(self.disable_auth) if Primitive.to_proto(self.enable_anonymous_user): resource.enable_anonymous_user = Primitive.to_proto( self.enable_anonymous_user ) if TenantMfaConfig.to_proto(self.mfa_config): resource.mfa_config.CopyFrom(TenantMfaConfig.to_proto(self.mfa_config)) else: resource.ClearField("mfa_config") if Primitive.to_proto(self.test_phone_numbers): resource.test_phone_numbers = Primitive.to_proto(self.test_phone_numbers) if Primitive.to_proto(self.project): resource.project = Primitive.to_proto(self.project) return resource class TenantMfaConfig(object): def __init__(self, state: str = None, enabled_providers: list = None): self.state = state self.enabled_providers = enabled_providers @classmethod def to_proto(self, resource): if not resource: return None res = tenant_pb2.IdentitytoolkitBetaTenantMfaConfig() if TenantMfaConfigStateEnum.to_proto(resource.state): res.state = TenantMfaConfigStateEnum.to_proto(resource.state) if TenantMfaConfigEnabledProvidersEnumArray.to_proto( resource.enabled_providers ): res.enabled_providers.extend( TenantMfaConfigEnabledProvidersEnumArray.to_proto( resource.enabled_providers ) ) return res @classmethod def from_proto(self, resource): if not resource: return None return TenantMfaConfig( state=TenantMfaConfigStateEnum.from_proto(resource.state), enabled_providers=TenantMfaConfigEnabledProvidersEnumArray.from_proto( resource.enabled_providers ), ) class TenantMfaConfigArray(object): @classmethod def to_proto(self, resources): if not resources: return resources return [TenantMfaConfig.to_
import urllib2 import base64 import json from link import *; from
GitFetcher import GitHubFetcher; username = "debuggerman" password = "megadeth" orgUrl = "https://api.github.com/orgs" orgName = "coeus-solutions" gitFetcher = GitHubFetcher(username = username, password = password, orgUrl = orgUrl, orgName = orgName) gitFetcher.getOrgInfo()
from omelette.fromage.common import DrawableNode class DrawableNode(DrawableNode)
: def __init__(self, uml_object): super(DrawableNode, self).__init__(uml_object)
''' Created on 20/10/2014 @author: fer ''' if __name__ == '__main__': print('
hola') x=32 #entero print x # variable mensaje mensaje = "hola mundo" print mensaje #booleano my_bool=True print my_bool #exponentes calculo = 10**2 print calculo print ("La variable calculo es de tipo: %s" % type(calculo)) print ("Clase %s" % type(calculo).__na
me__) ''' Conversion de tipos ''' entero = int(3.999) print entero real = float(3) print real cadena = str(32) print type(cadena) pass
ricAAE(AggregateMetric): """ computes average absolute error """ def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None): error = abs(groundTruth - prediction) accumulatedError += error if historyBuffer is not None: historyBuffer.append(error) if len(historyBuffer) > self.spec.params["window"] : accumulatedError -= historyBuffer.popleft() return accumulatedError def aggregate(self, accumulatedError, historyBuffer, steps): n = steps if historyBuffer is not None: n = len(historyBuffer) return accumulatedError/ float(n) class MetricAltMAPE(AggregateMetric): """ computes the "Alternative" Mean Absolute Percent Error. A generic MAPE computes the percent error for each sample, and then gets an average. This can suffer from samples where the actual value is very small or zero - this one sample can drastically alter the mean. This metric on the other hand first computes the average of the actual values and the averages of the errors before dividing. This washes out the effects of a small number of samples with very small actual values. """ def __init__(self, metricSpec): super(MetricAltMAPE, self).__init__(metricSpec) self._accumulatedGroundTruth = 0 self._accumulatedError = 0 def addInstance(self, groundTruth, prediction, record = None, result = None): # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None: return self.aggregateError # Compute absolute error error = abs(groundTruth - prediction) if self.verbosity > 0: print "MetricAltMAPE:\n groundTruth: %s\n Prediction: " \ "%s\n Error: %s" % (groundTruth, prediction, error) # Update the accumulated groundTruth and aggregate error if self.history is not None: self.history.append((groundTruth, error)) if len(self.history) > self.spec.params["window"] : (oldGT, oldErr) = self.history.popleft() self._accumulatedGroundTruth -= oldGT self._accumulatedError -= oldErr self._accumulatedGroundTruth += abs(groundTruth) self._accumulatedError += error # Compute aggregate pct error if self._accumulatedGroundTruth > 0: self.aggregateError = 100.0 * self._accumulatedError / \ self._accumulatedGroundTruth else: self.aggregateError = 0 if self.verbosity >= 1: print " accumGT:", self._accumulatedGroundTruth print " accumError:", self._accumulatedError print " aggregateError:", self.aggregateError self.steps += 1 return self.aggregateError class MetricMAPE(AggregateMetric): """ computes the "Classic" Mean Absolute Percent Error. This computes the percent error for each sample, and then gets an average. Note that this can suffer from samples where the actual value is very small or zero - this one sample can drastically alter the mean. To avoid this potential issue, use 'altMAPE' instead. This metric is provided mainly as a convenience when comparing results against other investigations that have also used MAPE. """ def __init__(self, metricSpec): super(MetricMAPE, self).__init__(metricSpec) self._accumulatedPctError = 0 def addInstance(self, groundTruth, prediction, record = None, result = None): # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None: return self.aggregateError # Compute absolute error if groundTruth != 0: pctError = float(abs(groundTruth - prediction))/groundTruth else: # Ignore this sample if self.verbosity > 0: print "Ignoring sample with groundTruth of 0" self.steps += 1 return self.aggregateError if self.verbosity > 0: print "MetricMAPE:\n groundTruth: %s\n Prediction: " \ "%s\n Error: %s" % (groundTruth, prediction, pctError) # Update the accumulated groundTruth and aggregate error if self.history is not None: self.history.append(pctError) if len(self.history) > self.spec.params["window"] : (oldPctErr) = self.history.popleft() self._accumulatedPctError -= oldPctErr self._accumulatedPctError += pctError # Compute aggregate pct error self.aggregateError = 100.0 * self._accumulatedPctError / len(self.history) if self.verbosity >= 1: print " accumPctError:", self._accumulatedPctError print " aggregateError:", self.aggregateError self.steps += 1 return self.aggregateError class MetricPassThruPrediction(MetricsIface): """ This is not a metric, but rather a facility for passing the predictions generated by a baseline metric through to the prediction output cache produced by a mode
l. For example, if you wanted to see the predictions generated for the TwoGram metric, you would specify 'PassThruPredictions' as the 'errorMetric' parameter. This metric class simply takes the prediction and outputs that as the aggregateMetric value. """ def __init__(self, metricSpec): self.spec = metricSpec self.window =
metricSpec.params.get("window", 1) self.avg = MovingAverage(self.window) self.value = None def addInstance(self, groundTruth, prediction, record = None, result = None): """Compute and store metric value""" self.value = self.avg(prediction) def getMetric(self): """Return the metric value """ return {"value": self.value} #def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer): # # Simply return the prediction as the accumulated error # return prediction # #def aggregate(self, accumulatedError, historyBuffer, steps): # # Simply return the prediction as the aggregateError # return accumulatedError class MetricMovingMean(AggregateMetric): """ computes error metric based on moving mean prediction """ def __init__(self, metricSpec): # This metric assumes a default 'steps' of 1 if not 'steps' in metricSpec.params: metricSpec.params['steps'] = 1 super(MetricMovingMean, self).__init__(metricSpec) # Only supports 1 item in _predictionSteps assert (len(self._predictionSteps) == 1) self.mean_window = 10 if metricSpec.params.has_key('mean_window'): assert metricSpec.params['mean_window'] >= 1 self.mean_window = metricSpec.params['mean_window'] # Construct moving average instance self._movingAverage = MovingAverage(self.mean_window) def getMetric(self): return self._subErrorMetrics[0].getMetric() def addInstance(self, groundTruth, prediction, record = None, result = None): # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA: return self._subErrorMetrics[0].aggregateError if self.verbosity > 0: print "groundTruth:\n{0!s}\nPredictions:\n{1!s}\n{2!s}\n".format(groundTruth, prediction, self.getMetric()) # Use ground truth from 'steps' steps ago as our most recent ground truth lastGT = self._getShiftedGroundTruth(groundTruth) if lastGT is None: return self._subErrorMetrics[0].aggregateError mean = self._movingAverage(lastGT) return self._subErrorMetrics[0].addInstance(groundTruth, mean, record) def evalCustomErrorMetric(expr, prediction, groundTruth, tools): sandbox = SafeInterpreter(writer=StringIO()) if isinstance(prediction, dict): sandbox.symtable['prediction'] = tools.mostLikely(prediction) sandbox.symtable['EXP'] = tools.expValue(prediction) sandbox.symtable['probabilityDistribution'] = prediction else: sandbox.symtable['prediction'] = prediction sandbox.symtable['groundTruth'] = groundTruth sandbox.symtable['tools'] = tools error = sandbox(expr) return error class CustomErrorMetric(MetricsIface): """ Custom Error Metric class that handles user defined error metrics """ class CircularBuffer(): """ implementation of a fixed size constant random access circular buffer """ def __init__(self,length): #Create an array to back the buffe
gy/ """ import asyncio import logging import voluptuous as vol import aiohttp import async_timeout from homeassistant.const import ( CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_URL, CONF_WHITELIST, CONF_VERIFY_SSL) from homeassistant.components.camera import ( Camera, PLATFORM_SCHEMA) from homeassistant.helpers.aiohttp_client import ( async_get_clientsession, async_create_clientsession, async_aiohttp_proxy_stream) import homeassistant.helpers.config_validation as cv from homeassistant.util.async import run_coroutine_threadsafe _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'Synology Camera' DEFAULT_STREAM_ID = '0' TIMEOUT = 5 CONF_CAMERA_NAME = 'camera_name' CONF_STREAM_ID = 'stream_id' QUERY_CGI = 'query.cgi' QUERY_API = 'SYNO.API.Info' AUTH_API = 'SYNO.API.Auth' CAMERA_API = 'SYNO.SurveillanceStation.Camera' STREAMING_API = 'SYNO.SurveillanceStation.VideoStream' SESSION_ID = '0' WEBAPI_PATH = '/webapi/' AUTH_PATH = 'auth.cgi' CAMERA_PATH = 'camera.cgi' STREAMING_PATH = 'SurveillanceStation/videoStreaming.cgi' CONTENT_TYPE_HEADER = 'Content-Type' SYNO_API_URL = '{0}{1}{2}' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_URL): cv.string, vol.Optional(CONF_WHITELIST, default=[]): cv.ensure_list, vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean, }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Setup a Synology IP Camera.""" verify_ssl = config.get(CONF_VERIFY_SSL) websession_init = async_get_clientsession(hass, verify_ssl) # Determine API to use for authentication syno_api_url = SYNO_API_URL.format( config.get(CONF_URL), WEBAPI_PATH, QUERY_CGI) query_payload = { 'api': QUERY_API, 'method': 'Query', 'version': '1', 'query': 'SYNO.' } query_req = None try: with async_timeout.timeout(TIMEOUT, loop=hass.loop): query_req = yield from websession_init.get( syno_api_url, params=query_payload ) query_resp = yield from query_req.json() auth_path = query_resp['data'][AUTH_API]['path'] camera_api = query_resp['data'][CAMERA_API]['path'] camera_path = query_resp['data'][CAMERA_API]['path'] streaming_path = query_resp['data'][STREAMING_API]['path'] except (asyncio.TimeoutError, aiohttp.errors.ClientError): _LOGGER.exception("Error on %s", syno_api_url) return False finally: if query_req is not None: yield from query_req.release() # Authticate to NAS to get a session id syno_auth_url = SYNO_API_URL.format( config.get(CONF_URL), WEBAPI_PATH, auth_path) session_id = yield from get_session_id( hass, websession_init, config.get(CONF_USERNAME), config.get(CONF_PASSWORD), syno_auth_url ) # init websession websession = async_create_clientsession( hass, verify_ssl, cookies={'id': session_id}) # Use SessionID to get cameras in system syno_camera_url = SYNO_API_URL.format( config.get(CONF_URL), WEBAPI_PATH, camera_api) camera_payload = { 'api': CAMERA_API, 'method': 'List', 'version': '1' } try: with async_timeout.timeout(TIMEOUT, loop=hass.loop): camera_req = yield from websession.get( syno_camera_url, params=camera_payload ) except (asyncio.TimeoutError, aiohttp.errors.ClientError): _LOGGER.exception("Error on %s", syno_camera_url) return False camera_resp = yield from camera_req.json() cameras = camera_resp['data']['cameras'] yield from camera_req.release() # add cameras devices = [] for camera in cameras: if not config.get(CONF_WHITELIST): camera_id = camera['id'] snapshot_path = camera['snapshot_path'] device = SynologyCamera( hass, websession, config, camera_id, camera['name'], snapshot_path, streaming_path, camera_path, auth_path ) devices.append(device) async_add_devices(devices) @asyncio.coroutine def get_session_id(hass, websession, username, password, login_url): """Get a session id.""" auth_payload = { 'api': AUTH_API, 'method': 'Login', 'version': '2', 'account': username, 'passwd': password, 'session': 'SurveillanceStation', 'format': 'sid' } auth_req = None try: with async_timeout.timeout(TIMEOUT, loop=hass.loop): auth_req = yield from websession.get( login_url, params=auth_payload ) auth_resp = yield from auth_req.json() return auth_resp['data']['sid'] except (asyncio.TimeoutError, aiohttp.errors.ClientError): _LOGGER.exception("Error on %s", login_url) return False finally: if auth_req is not None: yield from auth_req.release() class SynologyCamera(Camera): """An implementation of a Synology NAS based IP camera.""" def __init__(self, hass, websession, config, camera_id, camera_name, snapshot_path, streaming_path, camera_path, auth_path): """Initialize a Synology Surveillance Station camera.""" super().__init__() self.hass = hass self._websession = websession self._name = camera_name self._synology_url = config.get(CONF_URL) self._camera_name = config.get(CONF_CAMERA_NAME) self._stream_id = config.get(CONF_STREAM_ID) self._camera_id = camera_id self._snapshot_path = snapshot_path self._streaming_path = streaming_path self._camera_path = camera_path self._auth_path = auth_pa
th def camera_image(self): """Return bytes of camera image.""" return run_coroutine_threadsafe( self.async_camera_image(), self.hass.loop).result() @asyncio.coroutine def async_camera_image(self): """Return a still image r
esponse from the camera.""" image_url = SYNO_API_URL.format( self._synology_url, WEBAPI_PATH, self._camera_path) image_payload = { 'api': CAMERA_API, 'method': 'GetSnapshot', 'version': '1', 'cameraId': self._camera_id } try: with async_timeout.timeout(TIMEOUT, loop=self.hass.loop): response = yield from self._websession.get( image_url, params=image_payload ) except (asyncio.TimeoutError, aiohttp.errors.ClientError): _LOGGER.exception("Error on %s", image_url) return None image = yield from response.read() yield from response.release() return image @asyncio.coroutine def handle_async_mjpeg_stream(self, request): """Return a MJPEG stream image response directly from the camera.""" streaming_url = SYNO_API_URL.format( self._synology_url, WEBAPI_PATH, self._streaming_path) streaming_payload = { 'api': STREAMING_API, 'method': 'Stream', 'version': '1', 'cameraId': self._camera_id, 'format': 'mjpeg' } stream_coro = self._websession.get( streaming_url, params=streaming_payload) yield from async_aiohttp_proxy_stream(self.hass, request, stream_coro) @property def name(self):
import os import pickle import numpy as np from tqdm import tqdm class SumTree: def __init__(self, capacity): self.capacity = capacity self.tree = np.zeros(2 * capacity - 1, dtype=np.float32) self.data = np.empty(capacity, dtype=object) self.head = 0 @property def total_priority(self): return self.tree[0] @property def max_priority(self): return np.max(self.tree[-self.capacity:]) @property def min_priority(self): return np.min(self.tree[-self.capacity:]) def _tree_to_data_index(self, i): return i - self.capacity + 1 def _data_to_tree_index(self, i): return i + self.capacity - 1 def add(self, priori
ty, data): tree_index = self._data_to_tree_index(self.head) self.update_prio
rity(tree_index, priority) self.data[self.head] = data self.head += 1 if self.head >= self.capacity: self.head = 0 def update_priority(self, tree_index, priority): delta = priority - self.tree[tree_index] self.tree[tree_index] = priority while tree_index != 0: tree_index = (tree_index - 1) // 2 self.tree[tree_index] += delta def get_leaf(self, value): parent = 0 while True: left = 2 * parent + 1 right = left + 1 if left >= len(self.tree): leaf = parent break else: if value <= self.tree[left]: parent = left else: value -= self.tree[left] parent = right data_index = self._tree_to_data_index(leaf) return leaf, self.tree[leaf], self.data[data_index] class PrioritizedExperienceReplay: def __init__(self, capacity, initial_size, epsilon, alpha, beta, beta_annealing_rate, max_td_error, ckpt_dir): self.tree = SumTree(capacity) self.capacity = capacity self.epsilon = epsilon self.initial_size = initial_size self.alpha = alpha self.beta = beta self.beta_annealing_rate = beta_annealing_rate self.max_td_error = max_td_error self.ckpt_dir = ckpt_dir def add(self, transition): max_priority = self.tree.max_priority if max_priority == 0: max_priority = self.max_td_error self.tree.add(max_priority, transition) def sample(self, batch_size): self.beta = np.min([1., self.beta + self.beta_annealing_rate]) priority_segment = self.tree.total_priority / batch_size min_probability = self.tree.min_priority / self.tree.total_priority max_weight = (min_probability * batch_size) ** (-self.beta) samples, sample_indices, importance_sampling_weights = [], [], [] for i in range(batch_size): value = np.random.uniform(priority_segment * i, priority_segment * (i + 1)) index, priority, transition = self.tree.get_leaf(value) sample_probability = priority / self.tree.total_priority importance_sampling_weights.append(((batch_size * sample_probability) ** -self.beta) / max_weight) sample_indices.append(index) samples.append(transition) return sample_indices, samples, importance_sampling_weights def update_priorities(self, tree_indices, td_errors): td_errors += self.epsilon clipped_errors = np.minimum(td_errors, self.max_td_error) priorities = clipped_errors ** self.alpha for tree_index, priority in zip(tree_indices, priorities): self.tree.update_priority(tree_index, priority) def load_or_instantiate(self, env): if os.path.exists(os.path.join(self.ckpt_dir, "memory.pkl")): self.load() return state = env.reset() for _ in tqdm(range(self.initial_size), desc="Initializing replay memory", unit="transition"): action = env.action_space.sample() next_state, reward, done, info = env.step(action) transition = (state, action, reward, next_state, done) self.add(transition) state = next_state if done: state = env.reset() def load(self): with open(os.path.join(self.ckpt_dir, "memory.pkl"), "rb") as f: self.tree = pickle.load(f) def save(self): with open(os.path.join(self.ckpt_dir, "memory.pkl"), "wb") as f: pickle.dump(self.tree, f)
# SPDX-License-Identifier: Apache-2.0 # Copyright Contributors to the Rez Project """ Get a list of a package's plugins. """ from __future__ import print_function def setup_parser(parser, completions=False): parser.add_argument( "--paths", type=str, default=None, help="set package search path") PKG_action = parser.add_argument( "PKG", type=str, help="package to list plugins for") if completions: from rez.cli._complete_util import PackageFamilyCompleter PKG_acti
on.completer = PackageFamilyCompleter def command(opts, parser, extra_arg_groups=None): from rez.package_search import get_plugins from rez.config import config import os import os.path import sys config.override("warn_none", True) if opts.paths is None: pkg_paths = None else: p
kg_paths = opts.paths.split(os.pathsep) pkg_paths = [os.path.expanduser(x) for x in pkg_paths if x] pkgs_list = get_plugins(package_name=opts.PKG, paths=pkg_paths) if pkgs_list: print('\n'.join(pkgs_list)) else: print("package '%s' has no plugins." % opts.PKG, file=sys.stderr)
_file', parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) parent_file_entry = file_entry.GetParentFileEntry() self.assertIsNotNone(parent_file_entry) self.assertEqual(parent_file_entry.name, 'a_directory') def testIsFunctions(self): """Tests the Is? functions.""" path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, identifier=self._IDENTIFIER_ANOTHER_FILE, location='/a_directory/another_file', parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertFalse(file_entry.IsRoot()) self.assertFalse(file_entry.IsVirtual()) self.assertTrue(file_entry.IsAllocated()) self.assertFalse(file_entry.IsDevice()) self.assertFalse(file_entry.IsDirectory()) self.assertTrue(file_entry.IsFile()) self.assertFalse(file_entry.IsLink()) self.assertFalse(file_entry.IsPipe()) self.assertFalse(file_entry.IsSocket()) path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, identifier=self._IDENTIFIER_A_DIRECTORY, location='/a_directory', parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertFalse(file_entry.IsRoot()) self.assertFalse(file_entry.IsVirtual()) self.assertTrue(file_entry.IsAllocated()) self.assertFalse(file_entry.IsDevice()) self.assertTrue(file_entry.IsDirectory()) self.assertFalse(file_entry.IsFile()) self.assertFalse(file_entry.IsLink()) self.assertFalse(file_entry.IsPipe()) self.assertFalse(file_entry.IsSocket()) path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, location='/', parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertTrue(file_entry.IsRoot()) self.assertFalse(file_entry.IsVirtual()) self.assertTrue(file_entry.IsAllocated()) self.assertFalse(file_entry.IsDevice()) self.assertTrue(file_entry.IsDirectory()) self.assertFalse(file_entry.IsFile()) self.assertFalse(file_entry.IsLink()) self.assertFalse(file_entry.IsPipe()) s
elf.assertFalse(file_entry.IsSocket())
def testSubFileEntries(self): """Tests the number_of_sub_file_entries and sub_file_entries properties.""" path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, location='/', parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertEqual(file_entry.number_of_sub_file_entries, 4) expected_sub_file_entry_names = [ '.fseventsd', 'a_directory', 'a_link', 'passwords.txt'] sub_file_entry_names = [] for sub_file_entry in file_entry.sub_file_entries: sub_file_entry_names.append(sub_file_entry.name) self.assertEqual( len(sub_file_entry_names), len(expected_sub_file_entry_names)) self.assertEqual( sorted(sub_file_entry_names), sorted(expected_sub_file_entry_names)) # Test a path specification without a location. path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, identifier=self._IDENTIFIER_A_DIRECTORY, parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertEqual(file_entry.number_of_sub_file_entries, 3) def testDataStreams(self): """Tests the data streams functionality.""" path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, identifier=self._IDENTIFIER_ANOTHER_FILE, location='/a_directory/another_file', parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertEqual(file_entry.number_of_data_streams, 1) data_stream_names = [] for data_stream in file_entry.data_streams: data_stream_names.append(data_stream.name) self.assertEqual(data_stream_names, ['']) path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, identifier=self._IDENTIFIER_A_DIRECTORY, location='/a_directory', parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertEqual(file_entry.number_of_data_streams, 0) data_stream_names = [] for data_stream in file_entry.data_streams: data_stream_names.append(data_stream.name) self.assertEqual(data_stream_names, []) def testGetDataStream(self): """Tests the GetDataStream function.""" path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, identifier=self._IDENTIFIER_ANOTHER_FILE, location='/a_directory/another_file', parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) data_stream_name = '' data_stream = file_entry.GetDataStream(data_stream_name) self.assertIsNotNone(data_stream) class APFSFileEntryTestEncrypted(shared_test_lib.BaseTestCase): """Tests the APFS file entry on an encrypted file system.""" _APFS_PASSWORD = 'apfs-TEST' _IDENTIFIER_A_DIRECTORY = 18 _IDENTIFIER_A_LINK = 22 _IDENTIFIER_ANOTHER_FILE = 21 def setUp(self): """Sets up the needed objects used throughout the test.""" self._resolver_context = context.Context() test_path = self._GetTestFilePath(['apfs_encrypted.dmg']) self._SkipIfPathNotExists(test_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_OS, location=test_path) test_raw_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec) test_gpt_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_GPT, location='/p1', parent=test_raw_path_spec) self._apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/apfs1', parent=test_gpt_path_spec) self._apfs_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, location='/', parent=self._apfs_container_path_spec) resolver.Resolver.key_chain.SetCredential( self._apfs_container_path_spec, 'password', self._APFS_PASSWORD) self._file_system = apfs_file_system.APFSFileSystem( self._resolver_context, self._apfs_path_spec) self._file_system.Open() def tearDown(self): """Cleans up the needed objects used throughout the test.""" self._resolver_context.Empty() def testInitialize(self): """Tests the __init__ function.""" file_entry = apfs_file_entry.APFSFileEntry( self._resolver_context, self._file_system, self._apfs_path_spec) self.assertIsNotNone(file_entry) # TODO: add tests for _GetDirectory # TODO: add tests for _GetLink # TODO: add tests for _GetStat # TODO: add tests for _GetSubFileEntries def testAccessTime(self): """Test the access_time property.""" path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, identifier=self._IDENTIFIER_ANOTHER_FILE, location='/a_directory/another_file', parent=self._apfs_container_path_spec) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertIsNotNone(file_entry.access_time) def testChangeTime(self): """Test the change_time property.""" path_spec = path_spec_factory.Factory.NewPathSpec( de
s: add_qname(tag.text) elif isinstance(tag, basestring): if tag not in qnames: add_qname(tag) elif tag is not None and tag is not Comment and tag is not PI: _raise_serialization_error(tag) for key, value in elem.items(): if isinstance(key, QName): key = key.text if key not in qnames: add_qname(key) if isinstance(value, QName) and value.text not in qnames: add_qname(value.text) text = elem.text if isinstance(text, QName) and text.text not in qnames: add_qname(text.text) return qnames, namespaces def _serialize_xml(write, elem, encoding, qnames, namespaces): tag = elem.tag text = elem.text if tag is Comment: write("<!--%s-->" % _encode(text, encoding)) elif tag is ProcessingInstruction: write("<?%s?>" % _encode(text, encoding)) else: tag = qnames[tag] if tag is None: if text: write(_escape_cdata(text, encoding)) for e in elem: _serialize_xml(write, e, encoding, qnames, None) else: write("<" + tag) items = elem.items() if items or namespaces: if namespaces: for v, k in sorted(namespaces.items(), key=lambda x: x[1]): # sort on prefix if k: k = ":" + k write(" xmlns%s=\"%s\"" % ( k.encode(encoding), _escape_attrib(v, encoding) )) for k, v in sorted(items): # lexical order if isinstance(k, QName): k = k.text if isinstance(v, QName): v = qnames[v.text] else: v = _escape_attrib(v, encoding) write("
%s=\"%s\"" % (qnames[k], v)) if text or len(elem): write(">") if text: write(_escape_cdata(text, encoding)) for e in elem: _serialize_xml(write, e, encoding, qnames, None) write("</" + tag + ">") else: write(" />") if elem.tail: write(_escape_cdata(elem.tail, encoding)) HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame"
, "hr", "img", "input", "isindex", "link", "meta", "param") try: HTML_EMPTY = set(HTML_EMPTY) except NameError: pass def _serialize_html(write, elem, encoding, qnames, namespaces): tag = elem.tag text = elem.text if tag is Comment: write("<!--%s-->" % _escape_cdata(text, encoding)) elif tag is ProcessingInstruction: write("<?%s?>" % _escape_cdata(text, encoding)) else: tag = qnames[tag] if tag is None: if text: write(_escape_cdata(text, encoding)) for e in elem: _serialize_html(write, e, encoding, qnames, None) else: write("<" + tag) items = elem.items() if items or namespaces: if namespaces: for v, k in sorted(namespaces.items(), key=lambda x: x[1]): # sort on prefix if k: k = ":" + k write(" xmlns%s=\"%s\"" % ( k.encode(encoding), _escape_attrib(v, encoding) )) for k, v in sorted(items): # lexical order if isinstance(k, QName): k = k.text if isinstance(v, QName): v = qnames[v.text] else: v = _escape_attrib_html(v, encoding) # FIXME: handle boolean attributes write(" %s=\"%s\"" % (qnames[k], v)) write(">") ltag = tag.lower() if text: if ltag == "script" or ltag == "style": write(_encode(text, encoding)) else: write(_escape_cdata(text, encoding)) for e in elem: _serialize_html(write, e, encoding, qnames, None) if ltag not in HTML_EMPTY: write("</" + tag + ">") if elem.tail: write(_escape_cdata(elem.tail, encoding)) def _serialize_text(write, elem, encoding): for part in elem.itertext(): write(part.encode(encoding)) if elem.tail: write(elem.tail.encode(encoding)) _serialize = { "xml": _serialize_xml, "html": _serialize_html, "text": _serialize_text, # this optional method is imported at the end of the module # "c14n": _serialize_c14n, } ## # Registers a namespace prefix. The registry is global, and any # existing mapping for either the given prefix or the namespace URI # will be removed. # # @param prefix Namespace prefix. # @param uri Namespace uri. Tags and attributes in this namespace # will be serialized with the given prefix, if at all possible. # @exception ValueError If the prefix is reserved, or is otherwise # invalid. def register_namespace(prefix, uri): if re.match("ns\d+$", prefix): raise ValueError("Prefix format reserved for internal use") for k, v in _namespace_map.items(): if k == uri or v == prefix: del _namespace_map[k] _namespace_map[uri] = prefix _namespace_map = { # "well-known" namespace prefixes "http://www.w3.org/XML/1998/namespace": "xml", "http://www.w3.org/1999/xhtml": "html", "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", "http://schemas.xmlsoap.org/wsdl/": "wsdl", # xml schema "http://www.w3.org/2001/XMLSchema": "xs", "http://www.w3.org/2001/XMLSchema-instance": "xsi", # dublin core "http://purl.org/dc/elements/1.1/": "dc", } def _raise_serialization_error(text): raise TypeError( "cannot serialize %r (type %s)" % (text, type(text).__name__) ) def _encode(text, encoding): try: return text.encode(encoding, "xmlcharrefreplace") except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_cdata(text, encoding): # escape character data try: # it's worth avoiding do-nothing calls for strings that are # shorter than 500 character, or so. assume that's, by far, # the most common case in most applications. if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") return text.encode(encoding, "xmlcharrefreplace") except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_attrib(text, encoding): # escape attribute value try: if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") if "\"" in text: text = text.replace("\"", "&quot;") if "\n" in text: text = text.replace("\n", "&#10;") return text.encode(encoding, "xmlcharrefreplace") except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_attrib_html(text, encoding): # escape attribute value try: if "&" in text: text = text.replace("&", "&amp;") if ">" in text: text = text.replace(">", "&gt;") if "\"" in text: text = text.replace("\"", "&quot;") return text.encode(encoding, "xmlcharrefreplace") except (TypeError, AttributeError): _raise_serialization_error(text) # -------------------------------------------------------------------- ## # Ge
# -*- coding: utf-8 -*- ''' Flixnet Add-on Copyright (C) 2017 homik This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any lat
er version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import
urllib, urlparse, re from resources.lib.modules import cleantitle from resources.lib.modules import client class source: def __init__(self): self.priority = 1 self.language = ['pl'] self.domains = ['filiser.tv'] self.base_link = 'http://filiser.tv/' self.url_transl = 'embed?salt=%s' self.search_link = 'szukaj?q=%s' self.episode_link = '-Season-%01d-Episode-%01d' def do_search(self, title, year, is_movie_search): try: url = urlparse.urljoin(self.base_link, self.search_link) url = url % urllib.quote(title) result = client.request(url) result = result.decode('utf-8') result = client.parseDOM(result, 'ul', attrs={'id': 'resultList2'}) result = client.parseDOM(result[0], 'li') result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'div', attrs={'class': 'title'})[0], (client.parseDOM(i, 'div', attrs={'class': 'title_org'}) + [None])[0], client.parseDOM(i, 'div', attrs={'class': 'info'})[0], ) for i in result] search_type = 'Film' if is_movie_search else 'Serial' cleaned_title = cleantitle.get(title) # filter by name result = [x for x in result if cleaned_title == cleantitle.get(self.get_first_not_none([x[2], x[1]]))] # filter by type result = [x for x in result if x[3].startswith(search_type)] # filter by year result = [x for x in result if x[3].endswith(str(year))] if len(result) > 0: return result[0][0] else: return except : return def get_first_not_none(self, collection): return next(item for item in collection if item is not None) def movie(self, imdb, title, localtitle, year): return self.do_search(title, year, True) def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): return self.do_search(tvshowtitle, year, False) def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = client.request(url) result = client.parseDOM(result, 'ul', attrs={'data-season-num': season})[0] result = client.parseDOM(result, 'li') for i in result: s = client.parseDOM(i, 'a', attrs={'class': 'episodeNum'})[0] e = int(s[7:-1]) if e == int(episode): return client.parseDOM(i, 'a', attrs={'class': 'episodeNum'}, ret='href')[0] except : return def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.request(url) result = client.parseDOM(result, 'div', attrs={'id': 'links'}) attr = client.parseDOM(result, 'ul', ret='data-type') result = client.parseDOM(result, 'ul') for x in range(0, len(result)): transl_type = attr[x] links = result[x] sources += self.extract_sources(transl_type, links) return sources except: return sources def get_lang_by_type(self, lang_type): if lang_type == 'DUBBING': return 'pl', 'Dubbing' elif lang_type == 'NAPISY_PL': return 'pl', 'Napisy' if lang_type == 'LEKTOR_PL': return 'pl', 'Lektor' elif lang_type == 'POLSKI': return 'pl', None return 'en', None def extract_sources(self, transl_type, links): sources = [] data_refs = client.parseDOM(links, 'li', ret='data-ref') result = client.parseDOM(links, 'li') lang, info = self.get_lang_by_type(transl_type) for i in range(0, len(result)): el = result[i]; host = client.parseDOM(el, 'span', attrs={'class': 'host'})[0] quality = client.parseDOM(el, 'span', attrs={'class': 'quality'})[0] q = 'SD' if quality.endswith('720p'): q = 'HD' elif quality.endswith('1080p'): q = '1080p' sources.append({'source': host, 'quality': q, 'language': lang, 'url': data_refs[i], 'info': info, 'direct': False, 'debridonly': False}) return sources def resolve(self, url): try: url_to_exec = urlparse.urljoin(self.base_link, self.url_transl) % url result = client.request(url_to_exec) m = re.search("(?<=var url = ')(.*\n?)(?=')", result) result_url = m.group(0) result_url = result_url.replace('#WIDTH', '100') result_url = result_url.replace('#HEIGHT', '100') return result_url except: return
from gh_frespo_integration.utils import github_adapter from gh_frespo_integration.models import * from django.conf import settings import logging from datetime import timedelta __author__ = 'tony' logger = logging.getLogger(__name__) def get_repos_and_configs(user): repos = [] github_username = user.github_username() if github_username: repos = github_adapter.fetch_repos(github_username) for repo_dict in repos: gh_id = repo_dict['id'] repodb = get_repodb_by_githubid(gh_id) if repodb: user_repo_config = get_repo_config_by_repo_and_user(repodb, user) if user_repo_config: repo_dict['add_links'] = user_repo_config.add_links repo_dict['new_only'] = user_repo_config.new_only return repos def get_repodb_by_githubid(gh_id): repos = Repo.objects.filter(gh_id = gh_id) if repos.count() > 1: logger.error('Database inconsistency: multiple repos found with gh_id:%s'%gh_id) elif repos.count() == 1: return repos[0] else: return None def get_repo_config_by_repo_and_user(repo, user): configs = UserRepoConfig.objects.filter(repo__id = repo.id, user__id = user.id) if configs.count() > 1: logger.error('Database inconsistency: multiple configs found for repo:%s / user:%s'%(repo.id, user.id)) elif configs.count() == 1: return configs[0] else: return None def update_user_configs(user, dict): github_username = user.github_username() if github_username: repos = github_adapter.fetch_repos(github_username) my_repo_ids = [] for repo_dict in repos: gh_id = repo_dict['id'] repodb = get_repodb_by_githubid(gh_id) if not repodb: owner = repo_dict['owner']['login'] owner_t
ype = repo_dict['owner']['type'] name = repo_dict['name'] repodb = Repo.newRepo(owner, owner_type, name, gh_id, user) repodb.save() config = get_repo_config_by_repo_and_user(repodb, user) if not config: config = UserRepoConfig.newConfig(user, repodb) config.add_links = dict.has_key('check_addlink_%s' % gh_id) # config.new_only = dict.has_key('check_newonly_%s' % gh_i
d) config.new_only = True config.save() my_repo_ids.append(gh_id) UserRepoConfig.objects.filter(user__id = user.id).exclude(repo__gh_id__in = my_repo_ids).delete() def add_sponsorthis_comments(): configs = UserRepoConfig.objects.filter(add_links = True) logger.debug('starting sponsor_this routine...') for config in configs: repo_owner = config.repo.owner repo_name = config.repo.name last_ran = None logger.debug('processing repo_config %s (%s/%s)' % (config.id, config.repo.owner, config.repo.name)) if config.new_only or config.already_did_old: last_ran = config.last_ran - timedelta(hours=1) logger.debug('will list issues after %s' % last_ran) else: logger.debug('will list all issues') config.update_last_ran() try: issues = github_adapter.fetch_issues(repo_owner, repo_name, last_ran) logger.debug('issues are fetched') for issue in issues: _add_comment_if_not_already(config, int(issue['number']), repo_owner, repo_name) if not config.new_only: config.set_already_did_old() except BaseException as e: logger.error("Error adding comments repository %s/%s: %s" % (repo_owner, repo_name, e)) logger.debug('sponsor_this ended successfully') def _add_comment_if_not_already(repo_config, issue_number, repo_owner, repo_name): issue_already_commented = get_issue_already_commented(repo_config.repo, issue_number) if not issue_already_commented: body = u"""Do you care about this issue? To get it fixed quickly, [offer a cash incentive to developers on FreedomSponsors.org](%s/core/issue/sponsor?trackerURL=https://github.com/%s/%s/issues/%s). If you can only give US$5, offering just that will invite other people to do the same. Sharing the cost will soon add up!""" % (settings.SITE_HOME, repo_owner, repo_name, issue_number) github_adapter.bot_comment(repo_owner, repo_name, issue_number, body) issue_already_commented = IssueAlreadyCommented.newIssueAlreadyCommented(repo_config.repo, issue_number) issue_already_commented.save() logger.info('commented on issue %s of %s/%s' % (issue_number, repo_owner, repo_name)) else: logger.debug('NOT commenting on issue %s of %s/%s because it was already commented on' % (issue_number, repo_owner, repo_name)) def get_issue_already_commented(repo, number): iacs = IssueAlreadyCommented.objects.filter(repo__id = repo.id, number = number) if iacs.count() > 1: logger.error('Database inconsistency: multiple issue_already_commented found for repo:%s / number:%s'%(repo.id, number)) elif iacs.count() == 1: return iacs[0] else: return None
if i != numOfCols - 2: retTxt += ' & ' else: retTxt += ' \\\\\n' elif horizontal.name == 'hline': retTxt += '\hline\n' retTxt += '\\end{tabular}\\end{center}\\caption{' + props['caption'] + '}\\label{' + props['id'] + '}\\end{table}' return retTxt def main(): f = open("fyp.stmplt", "r") sty = f.read() f.close() f = open("fyp.ttmplt", "r") tex = f.read() f.close() f = open("report.xml", "r") xmlStr = f.read() f.close() root = etree.fromstring(xmlStr) projectTitle = root.find('projectDetails').find('projectTitle').text guide = root.find('projectDetails').find('guide').text principal = root.find('projectDetails').find('principal').text HOD = root.find('projectDetails').find('HOD').text durationLong = root.find('projectDetails').find('duration').text collLogoPath = root.find('projectDetails').find('collLogoPath').text defaultFontFamily = root.find('font').find('defaultFontFamily').text fontLevelOne = root.find('font').find('levelOne').text fontLevelTwo = root.find('font').find('levelTwo').text fontLevelThree = root.find('font').find('levelThree').text fontLevelFour = root.find('font').find('levelFour').text numberStrings = ["One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten"] students = [ (student.find('name').text, student.find('usn').text) for student in root.find('students').getchildren() if student.tag == 'student'] students = [ (numberStrings[i], students[i][0], students[i][1]) for i in range(len(students))] headerLogoScale = root.find('header').find('logoScale').text headerTitleSize = root.find('header').find('titleSize').text headerLineWidth = root.find('header').find('lineWidth').text dept = root.find('footer').find('dept').text durationShort = root.find('footer').find('duration').text footerLineWidth = root.find('footer').find('lineWidth').text chapterFontFamily = root.find('chapterControls').find('fontFamily').text coverFontFamily = root.find('cover').find('fontFamily').text univName = root.find('cover').find('univName').text univLogoPath = root.find('cover').find('univLogoPath').text univLogoScale = root.find('cover').find('univLogoScale').text course = root.find('cover').find('course').text stream = root.find('cover').find('stream').text deptName = root.find('cover').find('deptName').text collName = root.find('cover').find('collName').text affiliation = root.find('cover').find('affiliation').text address = root.find('cover').find('address').text collCoverLogoScale = root.find('cover').find('collCoverLogoScale').text vspaceInterblock = root.find('cover').find('vspaceInterblock').text vspaceIntrablock = root.find('cover').find('vspaceIntrablock').text certificateLog
oScale = root.find('certificate').find('logoScale').text certificateCourse = root.find('certificate').find('course').text certificateStream = root.find('certificate').find('stream').text certificateUnivName = root.find('certificate').find('univName').text abstractFontFamily = root.find('abstractControls').find('fontFamily').text ''' modifying the tex file '''
tex = fillContent(tex, 'newcommand{\projectTitle}{', projectTitle) tex = fillContent(tex, 'newcommand{\guide}{', guide) tex = fillContent(tex, 'newcommand{\principal}{', principal) tex = fillContent(tex, 'newcommand{\HOD}{', HOD) tex = fillContent(tex, 'newcommand{\durationLong}{', durationLong) tex = fillContent(tex, 'newcommand{\headerLineWidth}{', headerLineWidth) tex = fillContent(tex, 'newcommand{\\footerLineWidth}{', footerLineWidth) tex = fillContent(tex, 'newcommand{\collLogoPath}{', collLogoPath) tex = fillContent(tex, 'newcommand{\defaultFontFamily}{', defaultFontFamily) tex = fillContent(tex, 'newcommand{\\fontLevelOne}{', fontLevelOne) tex = fillContent(tex, 'newcommand{\\fontLevelTwo}{', fontLevelTwo) tex = fillContent(tex, 'newcommand{\\fontLevelThree}{', fontLevelThree) tex = fillContent(tex, 'newcommand{\\fontLevelFour}{', fontLevelFour) insIndex = tex.index('@studentsList') insStr = '' for student in students: insStr += '\\newcommand{\\student' + student[0] + '}{' + student[1] + '}\n' insStr += '\\newcommand{\\usn' + student[0] + '}{' + student[2] + '}\n' tex = tex[:insIndex] + insStr + tex[insIndex + len('@studentsList'):] tex = fillContent(tex, 'newcommand{\headerLogoScale}{', headerLogoScale) tex = fillContent(tex, 'newcommand{\headerTitleSize}{', headerTitleSize) tex = fillContent(tex, 'newcommand{\dept}{', dept) tex = fillContent(tex, 'newcommand{\durationShort}{', durationShort) tex = fillContent(tex, 'newcommand{\chapterFontFamily}{', chapterFontFamily) tex = fillContent(tex, 'newcommand{\coverFontFamily}{', coverFontFamily) tex = fillContent(tex, 'newcommand{\univName}{', univName) tex = fillContent(tex, 'newcommand{\univLogoPath}{', univLogoPath) tex = fillContent(tex, 'newcommand{\univLogoScale}{', univLogoScale) tex = fillContent(tex, 'newcommand{\course}{', course) tex = fillContent(tex, 'newcommand{\stream}{', stream) tex = fillContent(tex, 'newcommand{\deptName}{', deptName) tex = fillContent(tex, 'newcommand{\collName}{', collName) tex = fillContent(tex, 'newcommand{\\affiliation}{', affiliation) tex = fillContent(tex, 'newcommand{\\address}{', address) tex = fillContent(tex, 'newcommand{\collCoverLogoScale}{', collCoverLogoScale) tex = fillContent(tex, 'newcommand{\\vspaceInterblock}{', vspaceInterblock) tex = fillContent(tex, 'newcommand{\\vspaceIntrablock}{', vspaceIntrablock) tex = fillContent(tex, 'newcommand{\certificateLogoScale}{', certificateLogoScale) tex = fillContent(tex, 'newcommand{\certificateCourse}{', certificateCourse) tex = fillContent(tex, 'newcommand{\certificateStream}{', certificateStream) tex = fillContent(tex, 'newcommand{\certificateUnivName}{', certificateUnivName) tex = fillContent(tex, 'newcommand{\\abstractFontFamily}{', abstractFontFamily) insIndex = tex.index('@acknowledgement') insStr = etree.tostring(root.find('acknowledgement')) insStr = convertToTex(insStr) tex = tex[:insIndex] + insStr + tex[insIndex + len('@acknowledgement'):] insIndex = tex.index('@abstract') insStr = etree.tostring(root.find('abstract')) insStr = convertToTex(insStr) tex = tex[:insIndex] + insStr + tex[insIndex + len('@abstract'):] insIndex = tex.index('@chapters') insStr = '' chapters = root.findall('chapter') for chapter in chapters: insStrTemp = etree.tostring(chapter) insStrTemp = convertToTex('<content>' + insStrTemp + '</content>') insStr += insStrTemp + '\n' tex = tex[:insIndex] + insStr + tex[insIndex + len('@chapters'):] f = open("sample.tex", "w") f.write(tex) f.close() ''' modifying the style file ''' #modifying the cover page coverIndex = sty.index("@studentsListCover") insStrCover = '' for i in range(len(students)): if i == 0: insStrCover += '\\vspace{\\vspaceInterblock}\n\\textbf{\\student' + students[i][0] + ' - \usn' + students[i][0] + '}\n\n' else: insStrCover += '\\vspace{\\vspaceIntrablock}\n\\textbf{\\student' + students[i][0] + ' - \usn' + students[i][0] + '}\n\n' sty = sty[:coverIndex] + insStrCover + sty[coverIndex + len('@studentsListCover'):] #modifying the certificate certIndex = sty.index("@studentsListCertificate") insStrCertificate = '' for i in range(len(students)): if i == 0: insStrCertificate += '\\vspace{\\vspaceInterblock}\n\\textbf{\student' + students[i][0] + ', \usn' + students[i][0] + '}\n\n' else: insStrCertificate += '\\vspace{\\vspaceIntrablock}\n\
from django.contrib impo
rt admin from widgy.admin import WidgyAdmin from widgy.contrib.page_builder.models
import Callout admin.site.register(Callout, WidgyAdmin)
from django.db i
mport models from gitireadme.utils import getUploadToPath import datetime class Article(models.Model): name = models.CharField(max_length=255,blank=True,null=True) path = models.CharField(max_length=255,blank=True,null=True) class ArticleAlias(models.Model): repo = models.CharField(max_length=255,blank=True,null=True) article = models.ForeignKey(Article)
al("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n" "\nReturns:" "\n Sum of args." % (date, instructions), _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(arg0, arg1): """fn doc.""" return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.d
eprecated(date, instructions) def _fn(arg0, arg1): return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "DEPRECATED FUNCTION" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)
) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_instance_fn_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. Returns: Sum of args. """ return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n" "\nReturns:" "\n Sum of args." % (date, instructions), getattr(_Object, "_fn").__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _Object()._fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_instance_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): """fn doc.""" return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), getattr(_Object, "_fn").__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _Object()._fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_instance_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual( "DEPRECATED FUNCTION" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), getattr(_Object, "_fn").__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _Object()._fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) def test_prop_wrong_order(self): with self.assertRaisesRegexp( ValueError, "make sure @property appears before @deprecated in your source code"): # pylint: disable=unused-variable class _Object(object): def __init(self): pass @deprecation.deprecated("2016-07-04", "Instructions.") @property def _prop(self): return "prop_wrong_order" @test.mock.patch.object(logging, "warning", autospec=True) def test_prop_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @property @deprecation.deprecated(date, instructions) def _prop(self): """prop doc. Returns: String. """ return "prop_with_doc" # Assert function docs are properly updated. self.assertEqual( "prop doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" "\n" "\nReturns:" "\n String." % (date, instructions), getattr(_Object, "_prop").__doc__) # Assert calling new fn issues log warning. self.assertEqual("prop_with_doc", _Object()._prop) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_prop_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @property @deprecation.deprecated(date, instructions) def _prop(self): return "prop_no_doc" # Assert function docs are properly updated. self.assertEqual( "DEPRECATED FUNCTION" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), getattr(_Object, "_prop").__doc__) # Assert calling new fn issues log warning. self.assertEqual("prop_no_doc", _Object()._prop) self.ass
# coding=UTF-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals #import tornado from tornado import ioloop , web , httpserver , websocket , options #import handler function import handler import os #set server s
ettings server_settings = { "static_path": os.path.join(os.path.dirname(__file__), "static"), "xsrf_cookies": True, "autoreload": True, #"login_url": "/accounts/login", "debug":True, "template_path":os.path.join(os.path.dirname(__file__),"templates"), } #the handlers list handlers=[ (r"/?",handler.MainHandler), (r"/upload",handler.WavFileHandler) ] options.define("port", default=8080, help="the application will be run on the given port", type=int) if __name__ == "__ma
in__": options.parse_command_line() app_server = httpserver.HTTPServer(web.Application(handlers,**server_settings)) app_server.listen(options.options.port) ioloop.IOLoop.current().start()
""" Copyright 2013 Steven Diamond This file is part of CVXPY. CVXPY is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. CVXPY is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with CVXPY. If not, see <http://www.gnu.org/licenses/>. """ from cvxpy.lin_ops.lin_utils import * from cvxpy.lin_ops.lin_op import * from cvxpy.expressions.constants import Parameter import cvxpy.interface as intf import numpy as np import scipy.sparse as sp import unittest from cvxpy.tests.base_test import BaseTest import sys PY2 = sys.version_info < (3, 0) class test_lin_ops(BaseTest): """ Unit tests for the lin_ops module. """ def test_variables(self): """Test creating a variable. """ var = create_var((5, 4), var_id=1) self.assertEqual(var.size, (5, 4)) self.assertEqual(var.data, 1) self.assertEqual(len(var.args), 0) self.assertEqual(var.type, VARIABLE) def test_param(self): """Test creating a parameter. """ A = Parameter(5, 4) var = create_param(A, (5, 4)) self.assertEqual(var.size, (5, 4)) self.assertEqual(len(var.args), 0) self.assertEqual(var.type, PARAM) def test_constant(self): """Test creating a constant. """ # Scalar constant. size = (1, 1) mat = create_const(1.0, size) self.assertEqual(mat.size, size) self.assertEqual(len(mat.args), 0) self.assertEqual(mat.type, SCALAR_CONST) assert mat.data == 1.0 # Dense matrix constant. size = (5, 4) mat = create_const(np.ones(size), size) self.assertEqual(mat.size, size) self.assertEqual(len(mat.args), 0) self.assertEqual(mat.type, DENSE_CONST) assert (mat.data == np.ones(size)).all() # Sparse matrix constant. size = (5, 5) mat = create_const(sp.eye(5), size, sparse=True) self.assertEqual(mat.size, size) self.assertEqual(len(mat.args), 0) self.assertEqual(mat.type, SPARSE_CONST) assert (mat.data.todense() == sp.eye(5).todense()).all() def test_add_expr(self): """Test adding lin expr. """ size = (5, 4) x = create_var(size) y = create_var(size) # Expanding dict. add_expr = sum_expr([x, y]) self.assertEqual(add_expr.size, size) assert len(add_expr.args) == 2 def test_get_vars(self): """Test getting vars from an expression. """ size = (5, 4) x = create_var(size) y = create_var(size) A = create_const(np.ones(size), size) # Expanding dict. add_expr = sum_expr([x, y, A]) vars_ = get_expr_vars(add_expr) ref = [(x.data, size), (y.data, size)] if PY2: self.assertItemsEqual(vars_, ref) else: self.assertCountEqual(vars_, ref) def test_neg_expr(self): """Test negating an expression. """ size = (5, 4) var = create_var(size) expr = neg_expr(var) assert len(expr.args) == 1 self.assertEqual(expr.size, size) self.assertEqual(expr.type, NEG) def test_eq_constr(self): """Test creating an equality constraint. """ size = (5, 5) x = create_var(size) y = create_var(size) lh_expr = sum_expr([x, y]) value = np.ones(size) rh_expr = create_const(value, size) constr = create_eq(lh_expr, rh_expr) self.assertEqual(constr.size, size) vars_ = get_expr_vars(constr.expr) ref = [(x.data, size), (y.data, size)] if PY2: self.assertItemsEqual(vars_, ref) else: self.assertCountEqual(vars_, ref) def test_leq_constr(self): """Test creating a less than or equal constraint. """ size = (5, 5) x = create_var(size) y = create_var(size) lh_expr = sum_expr([x, y]) value = np.ones(size) rh_expr = create_const(value, size) constr = cr
eate_leq(lh_expr, rh_expr) self.assertEqual(constr.size, size) vars_ = get_expr_vars(constr.expr) ref = [(x.data, size), (y.data, size)] if PY2: self.assertItemsEqual(vars_,
ref) else: self.assertCountEqual(vars_, ref) def test_sum_entries(self): """Test sum entries op. """ size = (5, 5) x = create_var(size) expr = sum_entries(x) self.assertEqual(expr.size, (1, 1)) self.assertEqual(len(expr.args), 1) self.assertEqual(expr.type, lo.SUM_ENTRIES)
######################################## # Automatically genera
ted, do not edit. ######################################## from pyvisdk.thirdparty import Enum VirtualDiskAd
apterType = Enum( 'busLogic', 'ide', 'lsiLogic', )
self.d0 = 0.0 self.d1 = 0.0 self.d2 = 0.0 def __getitem__(self, i): """ Return the value at the i'th index of the vector. @param i the vector index @return the value at the i'th index of the vector. """ if i == 0: return self.d0 elif i == 1: return self.d1 else: return self.d2 def __setitem__(self, i, v): """ Set the value of the i'th index of the vector. @param i the vector index @param v the value to store """ if i == 0: self.d0 = v elif i == 1: self.d1 = v else: self.d2 = v def __iadd__(self, u): """ Add two vectors and the result is placed in self vector. @param u the other operand of the addition """ self.d0 += u.d0 self.d1 += u.d1 self.d2 += u.d2 return self def __isub__(self, u): """ Subtract two vectors and the result is placed in self vector. This vector contain the first operand. @param u the other operand of the subtraction. """ self.d0 -= u.d0 self.d1 -= u.d1 self.d2 -= u.d2 return self def __imul__(self, s): """ Multiply the vector times a scalar. @param s the scalar value """ self.d0 *= s self.d1 *= s self.d2 *= s return self def __idiv__(self, s): """ Divide each element of the vector by a scalar value. @param s the scalar value. """ self.d0 /= s self.d1 /= s self.d2 /= s return self def add_scalar(self, u, s): self.d0 = u.d0 + s self.d1 = u.d1 + s self.d2 = u.d2 + s def subtraction2(self, u, v): """ Subtract two vectors and the result is placed in self vector. @param u the first operand of the subtraction. @param v the second opernd of the subtraction """ self.d0 = u.d0 - v.d0 self.d1 = u.d1 - v.d1 self.d2 = u.d2 - v.d2 def mult_scalar2(self, u, s): """ Multiply the vector times a scalar and place the result in self vector. @param u the vector @param s the scalar value """ self.d0 = u.d0 * s self.d1 = u.d1 * s self.d2 = u.d2 * s def dot(self): """ Return the dot product of a vector. @return the dot product of a vector. """ return self.d0 * self.d0 + self.d1 * self.d1 + self.d2 * self.d2 def __repr__(self): return "%.17f %.17f %.17f " % (self.d0, self.d1, self.d2) class HG(object): """ A sub class which is used to compute and save information during the gravity computation phase. """ __slots__ = ["pskip", "pos0", "phi0", "acc0"] def __init__(self, b, p): """ Create a object. @param b the body object @param p a vector that represents the body """ # Body to skip in force evaluation self.pskip = b # Poat which to evaluate field self.pos0 = copy(p) # Computed potential at pos0 self.phi0 = 0.0 # computed acceleration at pos0 self.acc0 = Vec3() class Node(object): """A class that represents the common fields of a cell or body data structure.""" # highest bit of coord IMAX = 1073741824 # potential softening parameter EPS = 0.05 def __init__(self): """Construct an empty node""" self.mass = 0.0 # mass of the node self.pos = Vec3() # Position of the node def load_tree(self, p, xpic, l, root): raise NotImplementedError() def hack_cofm(self): raise NotImplementedError() def walk_sub_tree(self, dsq, hg): raise NotImplementedError() @staticmethod def old_sub_index(ic, l): i = 0 for k in xrange(Vec3.NDIM): if (int(ic[k]) & l) != 0: i += Cell.NSUB >> (k + 1) return i def __repr__(self): return "%f : %f" % (self.mass, self.pos) def grav_sub(self, hg): """Compute a single body-body or body-cell interaction""" dr = Vec3() dr.subtraction2(self.pos, hg.pos0) drsq = dr.dot() +
(Node.EPS * Node.EPS) drabs = sqrt(drsq) phii = self.mass / drabs hg.phi0 -= phii mor3 = phii / drsq dr *= mor3 hg.acc0 += dr return hg class Body(Node): """A class used to representing particles in the N-body simulation.""" def __init__(self):
"""Create an empty body.""" Node.__init__(self) self.vel = Vec3() self.acc = Vec3() self.new_acc = Vec3() self.phi = 0.0 def expand_box(self, tree, nsteps): """ Enlarge cubical "box", salvaging existing tree structure. @param tree the root of the tree. @param nsteps the current time step """ rmid = Vec3() inbox = self.ic_test(tree) while not inbox: rsize = tree.rsize rmid.add_scalar(tree.rmin, 0.5 * rsize) for k in xrange(Vec3.NDIM): if self.pos[k] < rmid[k]: rmin = tree.rmin[k] tree.rmin[k] = rmin - rsize tree.rsize = 2.0 * rsize if tree.root is not None: ic = tree.int_coord(rmid) if ic is None: raise Exception("Value is out of bounds") k = Node.old_sub_index(ic, Node.IMAX >> 1) newt = Cell() newt.subp[k] = tree.root tree.root = newt inbox = self.ic_test(tree) def ic_test(self, tree): """Check the bounds of the body and return True if it isn't in the correct bounds.""" pos0 = self.pos[0] pos1 = self.pos[1] pos2 = self.pos[2] # by default, it is in bounds result = True xsc = (pos0 - tree.rmin[0]) / tree.rsize if not (0.0 < xsc and xsc < 1.0): result = False xsc = (pos1 - tree.rmin[1]) / tree.rsize if not (0.0 < xsc and xsc < 1.0): result = False xsc = (pos2 - tree.rmin[2]) / tree.rsize if not (0.0 < xsc and xsc < 1.0): result = False return result def load_tree(self, p, xpic, l, tree): """ Descend and insert particle. We're at a body so we need to create a cell and attach self body to the cell. @param p the body to insert @param xpic @param l @param tree the root of the data structure @return the subtree with the body inserted """ # create a Cell retval = Cell() si = self.sub_index(tree, l) # attach self node to the cell retval.subp[si] = self # move down one level si = Node.old_sub_index(xpic, l) rt = retval.subp[si] if rt is not None: retval.subp[si] = rt.load_tree(p, xpic, l >> 1, tree) else: retval.subp[si] = p return retval def hack_cofm(self): """ Descend tree finding center of mass coordinates @return the mass of self node """ return self.mass def sub_index(self, tree, l): """ Determine which subcell to select. Combination of int_coord and old_sub_index. @param t the root of the tree """ xp = Vec3() xsc = (self.pos[0] - tree.rmin[0]) / tree.rsize xp[0] = floor(Node.IMAX * xsc) xsc = (self.pos[1] - tree.rmin[1]) / tree.rsize xp[1] = floor(Node.IMAX * xsc) xsc = (self.pos[2] -
from random import sample import os songList = [] songDict = None userPlayList = [] directory = '' def getSongs(name, limit): global songList global directory print('*'*30) print('* Minerando ', str(limit), ' músicas *') print('*'*30) status = 0 if not os.path.exists(directory): os.makedirs(directory) toSaveFile = open( 'config/data/oneMillionSongs/sets/' + str(name) + '/songs.csv', '
w+' ) toSaveFile.write('id,title\n') song
Set = sample( set( open( 'config/data/oneMillionSongs/originalCleanEntry/songs.csv', 'r+' ) ), limit ) for line in songSet: if (status % 1000 == 0): print ("-> [", status, "]") lineSplit = line.split(',') songList.append(lineSplit[0]) toSaveFile.write(lineSplit[0] + ',' + lineSplit[1] + '\n') if (status > limit): break status += 1 print ('- Total de Musicas: ', len(songList)) toSaveFile.close() print ('- Finalizando o script!') def getPlayCount(name, limit, userLimit): global songDict global userPlayList global directory print ('*'*30) print ('* Pegando Lista de pessoas que ouviram as musicas *') print ('*'*30) status = 0 if not os.path.exists(directory): os.makedirs(directory) toSaveFile = open( 'config/data/oneMillionSongs/sets/' + str(name) + '/playCount.csv', 'w+' ) toSaveFile.write('user_id,song_id,play_count\n') for line in open( 'config/data/oneMillionSongs/originalCleanEntry/playCount.csv', 'r+' ): status += 1 if status == 1: continue if (status % 1000 == 0): print ("-> [", status, "]") lineSplit = line.split(',') if (lineSplit[1] not in songDict): continue if (len(userPlayList) >= userLimit and lineSplit[0] not in userPlayList): continue if lineSplit[0] not in userPlayList: userPlayList.append(lineSplit[0]) toSaveFile.write(line) userDict = set(userPlayList) print ('- Total de usuarios: ', len(userDict)) usersToSaveFile = open( 'config/data/oneMillionSongs/sets/' + str(name) + '/users.csv', 'w+' ) usersToSaveFile.write('id\n') for user in userDict: usersToSaveFile.write(user + "\n") toSaveFile.close() usersToSaveFile.close() print ('- Finalizando o script!') def start(name, limit, userLimit=None): global directory global songDict directory = 'config/data/oneMillionSongs/sets/' + str(name) getSongs(name, limit) songDict = set(songList) getPlayCount(name, limit, userLimit) ########## def main(): start(name="thousand", limit=1000) start(name="two_thousand", limit=2000) start(name="three_thousand", limit=3000) start(name="ten_thousand", limit=10000)
# -*- coding: utf-8 -*- from django.core import exceptions from django.utils.importlib import import_module __author__ = 'jb' MIGRATION_MANAGERS = {} DEFAULT_MANAGER = None def load_manager(path): """ Code taken from django. Copyright (c) Django Software Foundation and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials pro
vided with the distribution. 3. Neither the name of Django nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ try: mw_module, mw_classname = path.rsplit('.', 1) except ValueError: raise exceptions.ImproperlyConfigured('%s isn\'t a manager module' % path) try: mod = import_module(mw_module) except ImportError, e: raise exceptions.ImproperlyConfigured('Error importing manager %s: "%s"' % (mw_module, e)) try: manager_instance = getattr(mod, mw_classname) except AttributeError: raise exceptions.ImproperlyConfigured('Manager module "%s" does not define a "%s" object' % (mw_module, mw_classname)) return manager_instance def initialize(): from django.conf import settings global MIGRATION_MANAGERS, DEFAULT_MANAGER MANAGERS = getattr(settings, "MIGRATION_MANAGERS", None) if MANAGERS is not None: for k, v in MANAGERS: MIGRATION_MANAGERS[k] = load_manager(v) DEFAULT_MANAGER = MIGRATION_MANAGERS[MANAGERS[0][0]] initialize()
#!/usr/bin/env python # -*- coding: utf-8 -*- """ A script to manage development tasks """ from __future__ import ( absolute_import, division, print_function, with_statement, unicode_literals) from os import path as p from manager import Manager from subprocess import call manager = Manager() _basedir = p.dirname(__file__) @manager.command def clean(): """Remove Python file and build artifacts""" call(p.join(_basedir, 'helpers', 'clean'), shell=True) @manager.command def check(): """Check staged changes for lint errors""" call(p.join(_basedir, 'helpers', 'check-stage'), shell=True) @manager.arg('where', 'w', help='Modules to check') @manager.command def lint(where=None): """Check style with flake8""" call('flake8 %s' % (where if where else ''), shell=True) @manager.command def pipme(): """Install requirements.txt""" call('pip install -r requirements.txt', shell=True) @manager.command def require(): """Create requirements.txt""" cmd = 'pip freeze -l | grep -vxFf dev-requirements.txt > requirements.txt' call(cmd, shell=True) @manager.arg('where', 'w', help='test path', default=None) @manager.arg( 'stop', 'x', help='Stop after first error', type=bool, default=False) @manager.comman
d def test(where=None, stop=False): """Run nose and script tests""" opts = '-xv' if stop else '-v' opts += 'w %
s' % where if where else '' call([p.join(_basedir, 'helpers', 'test'), opts]) @manager.command def register(): """Register package with PyPI""" call('python %s register' % p.join(_basedir, 'setup.py'), shell=True) @manager.command def release(): """Package and upload a release""" sdist() wheel() upload() @manager.command def build(): """Create a source distribution and wheel package""" sdist() wheel() @manager.command def upload(): """Upload distribution files""" call('twine upload %s' % p.join(_basedir, 'dist', '*'), shell=True) @manager.command def sdist(): """Create a source distribution package""" call(p.join(_basedir, 'helpers', 'srcdist'), shell=True) @manager.command def wheel(): """Create a wheel package""" call(p.join(_basedir, 'helpers', 'wheel'), shell=True) if __name__ == '__main__': manager.main()
""" Unique place for all runscripts to set the module path. (i.e. if you have a copy of TEMareels in some place on your hard disk but not in PYTHONPATH). NOTE: it is reco
mmended to keep a copy of all TEMareels module files with your data/analysis, as future versions will be not necessarily backwards compatible. Copyright (c) 2013, rhambach. This file is part of the TEMareels package and released under th
e MIT-Licence. See LICENCE file for details. """ # location of the TEMareels package on the hard disk # (if not specified in PYTHONPATH) pkgdir = '../..'; import sys from os.path import abspath; sys.path.insert(0,abspath(pkgdir));
import _plot
ly_utils.basevalidators class SourceattributionValidator(_plotly_utils.basevalidators.StringValidator): def __init__( self, plotly_name="sourceattribution", parent_name="layout.mapbox.layer", **kwargs ): super(SourceattributionValidator, self).__init__( plotly_name=plotly_name, parent_name
=parent_name, edit_type=kwargs.pop("edit_type", "plot"), role=kwargs.pop("role", "info"), **kwargs )
# coding=utf-8 """ InaSAFE Disaster risk assessment tool by AusAid - **Clipper test suite.** Contact : ole.moller.nielsen@gmail.com .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ import unittest import sys from tempfile import mktemp from qgis.core import QgsVectorLayer, QgsRasterLayer from PyQt4.QtCore import QFileInfo from osgeo import gdal from safe.test.utilities import ( get_qgis_app, load_test_vector_layer, standard_data_path) QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app() from safe.datastore.geopackage import GeoPackage # Decorator for expecting fails in windows but not other OS's # Probably we should move this somewhere in utils for easy re-use...TS def expect_failure_in_windows(exception): """Marks test to expect a fail in windows - call assertRaises internally. ..versionadded:: 4.0.0 """ def test_decorator(fn): def test_decorated(self, *args, **kwargs): if sys.platform.startswith('win'): self.assertRaises(exception, fn, self, *args, **kwargs) return test_decorated return test_decorator class TestGeoPackage(unittest.TestCase): """Test the GeoPackage datastore.""" def setUp(self): pass def tearDown(self): pass @unittest.skipIf( int(gdal.VersionInfo('VERSION_NUM')) < 2000000, 'GDAL 2.0 is required for geopackage.') def test_create_geopackage(self): """Test if we can store geopackage.""" # Create a geopackage from an empty file. path = QFileInfo(mktemp() + '.gpkg') self.assertFalse(path.exists()) data_store = GeoPackage(path) path.refresh() self.assertTrue(path.exists()) # Let's add a vector layer. layer_name = 'flood_test' layer = standard_data_path('hazard', 'flood_multipart_polygons.shp') vector_layer = QgsVectorLayer(layer, 'Flood', 'ogr') result = data_store.add_layer(vector_layer, layer_name) self.assertTrue(result[0]) # We should have one layer. layers = data_sto
re.layers() self.assertEqual(len(layers), 1) self.assertIn(layer_name, layers) # Add the same layer with another name. layer_name = 'another_vector_flood' result = data_store.add_layer(vector_layer, layer_name) self.assertTrue(result[0]) # We should have two layers. layers = data_store.layers() self.assertEqual(len(layers), 2) self.assertIn(layer_name, layers) # Test the URI of the new layer.
expected = path.absoluteFilePath() + '|layername=' + layer_name self.assertEqual(data_store.layer_uri(layer_name), expected) # Test a fake layer. self.assertIsNone(data_store.layer_uri('fake_layer')) # Test to add a raster layer_name = 'raster_flood' layer = standard_data_path('hazard', 'classified_hazard.tif') raster_layer = QgsRasterLayer(layer, layer_name) result = data_store.add_layer(raster_layer, layer_name) self.assertTrue(result[0]) # We should have 3 layers inside. layers = data_store.layers() self.assertEqual(len(layers), 3) # Check the URI for the raster layer. expected = 'GPKG:' + path.absoluteFilePath() + ':' + layer_name self.assertEqual(data_store.layer_uri(layer_name), expected) # Add a second raster. layer_name = 'big raster flood' self.assertTrue(data_store.add_layer(raster_layer, layer_name)) self.assertEqual(len(data_store.layers()), 4) # Test layer without geometry layer = load_test_vector_layer( 'gisv4', 'impacts', 'exposure_summary_table.csv') tabular_layer_name = 'breakdown' result = data_store.add_layer(layer, tabular_layer_name) self.assertTrue(result[0]) @unittest.skipIf( int(gdal.VersionInfo('VERSION_NUM')) < 2000000, 'GDAL 2.0 is required for geopackage.') @expect_failure_in_windows(AssertionError) def test_read_existing_geopackage(self): """Test we can read an existing geopackage.""" path = standard_data_path('other', 'jakarta.gpkg') import os path = os.path.normpath(os.path.normcase(os.path.abspath(path))) geopackage = QFileInfo(path) data_store = GeoPackage(geopackage) # We should have 3 layers in this geopackage. self.assertEqual(len(data_store.layers()), 3) # Test we can load a vector layer. roads = QgsVectorLayer( data_store.layer_uri('roads'), 'Test', 'ogr' ) self.assertTrue(roads.isValid()) # Test we can load a raster layers. # This currently fails on windows... # So we have decorated it with expected fail on windows # Should pass on other platforms. path = data_store.layer_uri('flood') flood = QgsRasterLayer(path, 'flood') self.assertTrue(flood.isValid()) if __name__ == '__main__': unittest.main()
lf._g_upper = None self._g_lower = None try: self.nlte_species = self.plasma_parent.nlte_species except: self.nlte_species = nlte_species def get_g_lower(self, g, lines_lower_level_index): if self._g_lower is None: g_lower = np.array(g.ix[lines_lower_level_index], dtype=np.float64) self._g_lower = g_lower[np.newaxis].T return self._g_lower def get_g_upper(self, g, lines_upper_level_index): if self._g_upper is None: g_upper = np.array(g.ix[lines_upper_level_index], dtype=np.float64) self._g_upper = g_upper[np.newaxis].T return self._g_upper def get_metastable_upper(self, metastability, lines_upper_level_index): if getattr(self, '_meta_stable_upper', None) is None: self._meta_stable_upper = metastability.values[ lines_upper_level_index][np.newaxis].T return self._meta_stable_upper def calculate(self, g, level_number_density, lines_lower_level_index, lines_upper_level_index, metastability, lines): n_lower = level_number_density.values.take(lines_lower_level_index, axis=0, mode='raise') n_upper = level_number_density.values.take(lines_upper_level_index, axis=0, mode='raise') g_lower = self.get_g_lower(g, lines_lower_level_index) g_upper = self.get_g_upper(g, lines_upper_level_index) meta_stable_upper = self.get_metastable_upper(metastability, lines_upper_level_index) stimulated_emission_factor = ne.evaluate('1 - ((g_lower * n_upper) / ' '(g_upper * n_lower))') stimulated_emission_factor[n_lower == 0.0] = 0.0 stimulated_emission_factor[np.isneginf(stimulated_emission_factor)]\ = 0.0 stimulated_emission_factor[meta_stable_upper & (stimulated_emission_factor < 0)] = 0.0 if self.nlte_species: nlte_lines_mask = \ np.zeros(stimulated_emission_factor.shape[0]).astype(bool) for species in self.nlte_species: nlte_lines_mask |= (lines.atomic_number == species[0]) & \ (lines.ion_number == species[1]) stimulated_emission_factor[(stimulated_emission_factor < 0) & nlte_lines_mask[np.newaxis].T] = 0.0 return stimulated
_emission_factor class TauSobolev(ProcessingPlasmaProperty): """ Attributes ---------- tau_sobolev : Pandas DataFrame, dtype float Sobolev optical depth for each line. Indexed by line. Columns as zones. """ outputs = ('tau_sobo
levs',) latex_name = ('\\tau_{\\textrm{sobolev}}',) latex_formula = ('\\dfrac{\\pi e^{2}}{m_{e} c}f_{lu}\\lambda t_{exp}\ n_{lower} \\Big(1-\\dfrac{g_{lower}n_{upper}}{g_{upper}n_{lower}}\\Big)',) def __init__(self, plasma_parent): super(TauSobolev, self).__init__(plasma_parent) self.sobolev_coefficient = (((np.pi * const.e.gauss ** 2) / (const.m_e.cgs * const.c.cgs)) * u.cm * u.s / u.cm**3).to(1).value def calculate(self, lines, level_number_density, lines_lower_level_index, time_explosion, stimulated_emission_factor, j_blues, f_lu, wavelength_cm): f_lu = f_lu.values[np.newaxis].T wavelength = wavelength_cm.values[np.newaxis].T n_lower = level_number_density.values.take(lines_lower_level_index, axis=0, mode='raise') tau_sobolevs = (self.sobolev_coefficient * f_lu * wavelength * time_explosion * n_lower * stimulated_emission_factor) if (np.any(np.isnan(tau_sobolevs)) or np.any(np.isinf(np.abs(tau_sobolevs)))): raise ValueError( 'Some tau_sobolevs are nan, inf, -inf in tau_sobolevs.' ' Something went wrong!') return pd.DataFrame(tau_sobolevs, index=lines.index, columns=np.array(level_number_density.columns)) class BetaSobolev(ProcessingPlasmaProperty): """ Attributes ---------- beta_sobolev : Numpy Array, dtype float """ outputs = ('beta_sobolev',) latex_name = ('\\beta_{\\textrm{sobolev}}',) def calculate(self, tau_sobolevs): if getattr(self, 'beta_sobolev', None) is None: beta_sobolev = np.zeros_like(tau_sobolevs.values) else: beta_sobolev = self.beta_sobolev macro_atom.calculate_beta_sobolev( tau_sobolevs.values.ravel(), beta_sobolev.ravel()) return beta_sobolev class TransitionProbabilities(ProcessingPlasmaProperty): """ Attributes ---------- transition_probabilities : Pandas DataFrame, dtype float """ outputs = ('transition_probabilities',) def __init__(self, plasma_parent): super(TransitionProbabilities, self).__init__(plasma_parent) self.initialize = True def calculate(self, atomic_data, beta_sobolev, j_blues, stimulated_emission_factor, tau_sobolevs): #I wonder why? # Not sure who wrote this but the answer is that when the plasma is # first initialised (before the first iteration, without temperature # values etc.) there are no j_blues values so this just prevents # an error. Aoife. if len(j_blues) == 0: return None macro_atom_data = self._get_macro_atom_data(atomic_data) if self.initialize: self.initialize_macro_atom_transition_type_filters(atomic_data, macro_atom_data) self.transition_probability_coef = ( self._get_transition_probability_coefs(macro_atom_data)) self.initialize = False transition_probabilities = self._calculate_transition_probability(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor) transition_probabilities = pd.DataFrame(transition_probabilities, index=macro_atom_data.transition_line_id, columns=tau_sobolevs.columns) return transition_probabilities def _calculate_transition_probability(self, macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor): transition_probabilities = np.empty((self.transition_probability_coef.shape[0], beta_sobolev.shape[1])) #trans_old = self.calculate_transition_probabilities(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor) transition_type = macro_atom_data.transition_type.values lines_idx = macro_atom_data.lines_idx.values tpos = macro_atom_data.transition_probability.values #optimized_calculate_transition_probabilities(tpos, beta_sobolev, j_blues, stimulated_emission_factor, transition_type, lines_idx, self.block_references, transition_probabilities) macro_atom.calculate_transition_probabilities(tpos, beta_sobolev, j_blues, stimulated_emission_factor, transition_type, lines_idx, self.block_references, transition_probabilities) return transition_probabilities def calculate_transition_probabilities(self, macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor): transition_probabilities = self.prepare_transition_probabilities(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor) return transition_probabilities def initialize_macro_atom_transition_type_filters(self, atomic_data, macro_atom_data): self.transition_up_filter = (macro_atom_data.transition_type.values == 1) self.transition_up_line_filter = macro_atom_data.lines_idx.values[ self.transition_up_filter] self.block_references = np.hstack(( atomic_data.macro_atom_references.block_references,
#!/usr/bin/env python """ Get an action by name (name is not a supported selector for action) """ # import the basic python packages we need import os import sys import tempfile import pprint import traceback # disable python from generating a .pyc file sys.dont_write_bytecode = True # change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API pytan_loc = "~/gh/pytan" pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib') # Determine our script name, script dir my_file = os.path.abspath(sys.argv[0]) my_dir = os.path.dirname(my_file) # try to automatically determine the pytan lib directory by assuming it is in '../../lib/' parent_dir = os.path.dirname(my_dir) pytan_root_dir = os.path.dirname(parent_dir) lib_dir = os.path.join(pytan_root_dir, 'lib') # add pytan_loc and lib_dir to the PYTHONPATH variable path_adds = [lib_dir, pytan_static_path] [sys.path.append(aa) for aa in path_adds if aa not in sys.path] # import pytan import pytan # create a dictionary of arguments for the pytan handler handler_args = {} # establish our connection info for the Tanium Server handler_args['username'] = "Administrator" handler_args['password'] = "Tanium2015!" handler_args['host'] = "10.0.1.240" handler_args['port'] = "443" # optional handler_args['trusted_certs'] = "certs" # optional, level 0 is no output except warnings/errors # level 1 through 12 are more and more verbose handler_args['loglevel'] = 1 # optional, use a debug format for the logging output (uses two lines per log entry) handler_args['debugformat'] = False # optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES # very useful for capturing the full exchange of XML requests and responses handler_args['record_all_requests'] = True # instantiate a handler using all of the arguments in the handler_args dictionary print "...CALLING: pytan.handler() with args: {}".format(handler_args) handler = pytan.Handler(**handler_args) # print out the handler string print "...OUTPUT: handler string: {}".format(handler) # setup the arguments for the handler()
class kwargs = {} kwargs["objtype"] = u'action' kwargs["name"] = u'Distribute Tanium Standard Utilities' print "...CALLING: handler.get() with args: {}".format
(kwargs) try: handler.get(**kwargs) except Exception as e: print "...EXCEPTION: {}".format(e) # this should throw an exception of type: pytan.exceptions.HandlerError # uncomment to see full exception # traceback.print_exc(file=sys.stdout) '''STDOUT from running this: ...CALLING: pytan.handler() with args: {'username': 'Administrator', 'record_all_requests': True, 'loglevel': 1, 'debugformat': False, 'host': '10.0.1.240', 'password': 'Tanium2015!', 'port': '443'} ...OUTPUT: handler string: PyTan v2.1.4 Handler for Session to 10.0.1.240:443, Authenticated: True, Platform Version: 6.5.314.4301 ...CALLING: handler.get() with args: {'objtype': u'action', 'name': u'Distribute Tanium Standard Utilities'} ...EXCEPTION: Getting a action requires at least one filter: ['id'] ''' '''STDERR from running this: '''
from . import base from grow.common import utils from protorpc import messages import logging import os import re if utils.is_appengine(): sass = None # Unavailable on Google App Engine. else: import sass SUFFIXES = frozenset(['sass', 'scss']) SUFFIX_PATTERN = re.compile('[.](' + '|'.join(map(re.escape, SUFFIXES)) + ')$') class Config(messages.Message): sass_dir = messages.StringField(1) out_dir = messages.StringField(2) suffix = messages.StringField(3, default='.min.css') output_style = messages.StringField(4, default='compressed') source_comments = messages.BooleanField(5) image_path = messages.StringField(6) class SassPreprocessor(base.BasePreprocessor): KIND = 'sass' Config = Config def run(self, build=True): sass_dir = os.path.abspath(os.path.join(self.root, self.config.sass_dir.lstrip('/'))) out_dir = os.path.abspath(os.path.join(self.root, self.config.out_dir.lstrip('/'))) self.build_directory(sass_dir, out_dir) def build_directory(self, sass_path, css_path, _root_sass=None, _root_css=None): if sass is None: raise utils.UnavailableError('The Sass compiler is not available in this environment.') if self.config.image_path: image_path = os.path.abspath(os.path.join(self.root, self.config.image_path.lstrip('/'))) else: image_path = None _root_sass = sass_path if _root_sass is None else _root_sass _root_css = css_path if _root_css is None else _root_css result = {} if not os.path.isdir(css_path): os.makedirs(css_path) for name in os.listdir(sass_path): if not SUFFIX_PATTERN.search(name) or name.startswith('_'): continue sass_fullname = os.path.join(sass_path, name) if os.path.isfile(sass_fullname): basename = os.path.splitext(name)[0] css_fullname = os.path.join(css_path, basename) + self.config.suffix try: kwargs = { 'filename': sass_fullname, 'include_paths': [_root_sass], 'output_style': self.config.output_style, } if self.config.output_style is not None: kwargs['output_style'] = self.config.output_style if image_path is not None: kwargs['image_path'] = image_path if self.config.image_path is not None: kwargs['image_path'] = image_path css = sass.compile(**kwargs) except sass.CompileError as e: logging.error(str(e)) return result with open(css
_fullname, 'w') as css_file: if isinstance(css, unicode):
css = css.encode('utf-8') css_file.write(css) result[sass_fullname] = css_fullname elif os.path.isdir(sass_fullname): css_fullname = os.path.join(css_path, name) subresult = self.build_directory(sass_fullname, css_fullname, _root_sass, _root_css) result.update(subresult) for sass_path, out_path in result.iteritems(): self.logger.info( 'Compiled: {} -> {}'.format(sass_path.replace(self.root, ''), out_path.replace(self.root, ''))) return result def list_watched_dirs(self): return [self.config.sass_dir]
sCLI' copyright = u'2014' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # import hdfs # The short X.Y version. version = hdfs.__version__.rsplit('.', 1)[0] # The full version, including alpha/beta/rc tags. release = hdfs.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # Autodoc autoclass_content = 'both' # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'hdfsdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'hdfs.tex', u'hdfs Documentation', u'Author', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'hdfs', u'hdfs documentation', [u'Author'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'hdfs', u'hdfs documentation', u'Author', 'hdfs', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'hdfs' epub_author = u'Author' epub_publisher = u'Author' epub_copyright = u'2014, Author' # The basename f
or the epub file. It defaults to the project name. #epub_basename = u'hdfs' # The HTML theme for the epub output. Since the default themes are not opt
imized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. #epub_theme = 'epub' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. #epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The
from __future__ import print_function, unicode_literals, division, absolute_import import os import sys import string import random import hashlib import logging import subprocess from io import StringIO import config def valid_path(path): """ Returns an expanded, absolute path, or None if the path does not exist. """ path = os.path.expanduser(path) if not os.path.exists(path): return None return os.path.abspath(path) def get_paths(args): """ Returns expanded, absolute paths for all valid paths in a list of arguments. """ assert isinstance(args, list) valid_paths = [] for path in args: abs_path = valid_path(path) if abs_path is not None: valid_paths.append(abs_path) return valid_paths def split_path(path): """ Returns a normalized list of the path's components. """ path = os.path.normpath(path) return [x for x in path.split(os.path.sep) if x] def generate_id(size=10, chars=string.ascii_uppercase + string.digits): """ Generate a string of random alphanumeric characters. """ return ''.join(random.choice(chars) for i in range(size)) def list_contents(rar_file_path): """ Returns a list of the archive's contents. """ assert os.path.isfile(rar_file_path) and rar_file_path.endswith('.rar') contents = [] count = 0 command = '"{unrar}" v -- "{file}"' command = command.format(unrar=config.UNRAR_PATH, file=rar_file_path) try: output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as e: output = e.output.decode(encoding='utf-8') msg = 'Error while listing archive contents: "{error_string}"' raise FileUtilsError(msg.format(error_string=output.strip())) else: output = StringIO(output.decode(encoding='utf-8')) parse = False for line in output.readlines(): line_list = line.strip().split() # If the line is not empty... if line_list: # This marks the start and end of the section we want to parse if line_list[0] == '-------------------------------------------------------------------------------': parse = not parse count = 0 # If we're in the section of the output we want to parse... elif parse: # Parse every other line (only the file paths) if count % 2 == 0: contents.append(line_list[0]) count += 1 return contents def unrar(rar_file_path, destination_dir=None): """ Get a list of the archive's contents, then extract the archive and return the list. """ assert os.path.isfile(rar_file_path) and rar_file_path.endswith('.rar') if not destination_dir: destination_dir = os.path.split(rar_file_p
ath)[0] # Get a list of the archive's contents contents = list_contents(rar_file_path) extracted_files = [] # Extract the archive command = '
"{unrar}" x -o+ -- "{file}" "{destination}"' command = command.format(unrar=config.UNRAR_PATH, file=rar_file_path, destination=destination_dir) logging.debug(command) try: subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as e: output = e.output.decode(encoding='utf-8') msg = 'Error while extracting!\n{error_string}' raise FileUtilsError(msg.format(error_string=output.strip())) for relative_path in contents: path = os.path.join(destination_dir, relative_path) # Recursively extract until there are no RAR files left if path.endswith('.rar'): extracted_files += unrar(path) else: extracted_files.append(path) # Return the list of paths return extracted_files def sha1(data): """ Return the SHA-1 hash of the given data. """ assert isinstance(data, (bytes, bytearray)) sha1_hash = hashlib.sha1() sha1_hash.update(data) return sha1_hash.digest() def set_log_file_name(file_name): """ Set the file name for log output. """ # Remove all logging handlers from the root logger logger = logging.getLogger('') for handler in list(logger.handlers): logger.removeHandler(handler) handler.flush() handler.close() # Configure console logging console_log_format = logging.Formatter('%(module)-15s: %(levelname)-8s %(message)s') console_log_handler = logging.StreamHandler(sys.stdout) console_log_handler.setFormatter(console_log_format) console_log_handler.setLevel(logging.INFO) logger.addHandler(console_log_handler) # Configure disk logging if file_name: log_path = os.path.join(config.LOG_DIR, file_name) disk_log_format = logging.Formatter('%(asctime)s %(module)-15s: %(levelname)-8s %(message)s') disk_log_handler = logging.FileHandler(filename=log_path, mode='w', encoding='utf-8') disk_log_handler.setFormatter(disk_log_format) disk_log_handler.setLevel(logging.DEBUG) logger.addHandler(disk_log_handler) logger.setLevel(logging.DEBUG) # Set logging level for the requests lib to warning+ requests_log = logging.getLogger('requests') requests_log.setLevel(logging.WARNING) # Log system info and Python version for debugging purposes logging.debug('Python {version}'.format(version=sys.version)) logging.debug('System platform: {platform}'.format(platform=sys.platform)) class FileUtilsError(Exception): pass
"""Config flow for TP-Link.""" from __future__ import annotations from typing import Any from kasa import SmartDevice, SmartDeviceException from kasa.discover import Discover import voluptuous as vol from homeassistant import config_entries from homeassistant.components import dhcp from homeassistant.const import CONF_DEVICE, CONF_HOST, CONF_MAC from homeassistant.core import callback from homeassistant.data_entry_flow import FlowResult from homeassistant.helpers import device_registry as dr from homeassistant.helpers.typing import DiscoveryInfoType from . import async_discover_devices from .const import DOMAIN class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for tplink.""" VERSION = 1 def __init__(self) -> None: """Initialize the config flow.""" self._discovered_devices: dict[str, SmartDevice] = {} self._discovered_device: SmartDevice | None = None async def async_step_dhcp(self, discovery_info: dhcp.DhcpServiceInfo) -> FlowResult: """Handle discovery via dhcp.""" return await self._async_handle_discovery( discovery_info.ip, discovery_info.macaddress ) async def async_step_discovery( self, discovery_info: DiscoveryInfoType ) -> FlowResult: """Handle discovery.""" return await self._async_handle_discovery( discovery_info[CONF_HOST], discovery_info[CONF_MAC] ) async def _async_handle_discovery(self, host: str, mac: str) -> FlowResult: """Handle any discovery.""" await self.async_set_unique_id(dr.format_mac(mac)) self._abort_if_unique_id_configured(updates={CONF_HOST: host}) self._async_abort_entries_match({CONF_HOST: host}) self.context[CONF_HOST] = host for progress in self._async_in_progress(): if progress.get("context", {}).get(CONF_HOST) == host: return self.async_abort(reason="already_in_progress") try: self._discovered_device = await self._async_try_connect( host, raise_on_progress=True ) except SmartDeviceException: return self.async_abort(reason="cannot_connect") return await self.async_step_
discovery_confirm() async def async_step_discovery_confirm( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Confirm discovery.""" assert self._discovered_device is not None if user_input is not None: return self._async_crea
te_entry_from_device(self._discovered_device) self._set_confirm_only() placeholders = { "name": self._discovered_device.alias, "model": self._discovered_device.model, "host": self._discovered_device.host, } self.context["title_placeholders"] = placeholders return self.async_show_form( step_id="discovery_confirm", description_placeholders=placeholders ) async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle the initial step.""" errors = {} if user_input is not None: if not (host := user_input[CONF_HOST]): return await self.async_step_pick_device() try: device = await self._async_try_connect(host, raise_on_progress=False) except SmartDeviceException: errors["base"] = "cannot_connect" else: return self._async_create_entry_from_device(device) return self.async_show_form( step_id="user", data_schema=vol.Schema({vol.Optional(CONF_HOST, default=""): str}), errors=errors, ) async def async_step_pick_device( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle the step to pick discovered device.""" if user_input is not None: mac = user_input[CONF_DEVICE] await self.async_set_unique_id(mac, raise_on_progress=False) return self._async_create_entry_from_device(self._discovered_devices[mac]) configured_devices = { entry.unique_id for entry in self._async_current_entries() } self._discovered_devices = await async_discover_devices(self.hass) devices_name = { formatted_mac: f"{device.alias} {device.model} ({device.host}) {formatted_mac}" for formatted_mac, device in self._discovered_devices.items() if formatted_mac not in configured_devices } # Check if there is at least one device if not devices_name: return self.async_abort(reason="no_devices_found") return self.async_show_form( step_id="pick_device", data_schema=vol.Schema({vol.Required(CONF_DEVICE): vol.In(devices_name)}), ) @callback def _async_create_entry_from_device(self, device: SmartDevice) -> FlowResult: """Create a config entry from a smart device.""" self._abort_if_unique_id_configured(updates={CONF_HOST: device.host}) return self.async_create_entry( title=f"{device.alias} {device.model}", data={ CONF_HOST: device.host, }, ) async def _async_try_connect( self, host: str, raise_on_progress: bool = True ) -> SmartDevice: """Try to connect.""" self._async_abort_entries_match({CONF_HOST: host}) device: SmartDevice = await Discover.discover_single(host) await self.async_set_unique_id( dr.format_mac(device.mac), raise_on_progress=raise_on_progress ) return device
# -*- coding: utf-8 -*- #!/usr/bin/env python #第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码(或者优惠券),使用 Python
如何生成 200 个激活码(或者优惠券)? import random import string def activation_code(chars = string.ascii_uppercase + string.digits, length=16): return ''.join([random.choice(char
s) for i in range(length)]) if __name__ == '__main__': code_collection = set() for i in range(200): code = activation_code() if code not in code_collection: code_collection.add(code) else: continue
""" Simple http server to create streams for asyncio tests """ import asyncio import aiohttp from aiohttp import web import codecs from utils.streamdecoder import DecodingStreamReader async def get_data(host, port): url = 'http://{}:{}/'.format(host, port) decoder = codecs.getincrementaldecoder('utf-8')(errors='strict') async with aiohttp.get(url) as r: stream = DecodingStreamReader(r.content) while not stream.at_eof(): data = await stream.read(7) print(data, end='') class UTF8Server: def __init__(self): self.app = web.Application() self.app.router.add_route('GET', '/', self.hello) self.handler = self.app.make_handler() async def hello(self, request): return web.Response(body=b'M\xc3\xa4dchen mit Bi\xc3\x9f\n') # return web.Response(body=b'\xc3\x84\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4h!\n') def start_server(self, loop, host, port): setup = loop.create_server(self.handler, host, port) self.srv = loop.run_until_complete(setup) return self.srv def stop_server(self, loop): self.srv.close() loop.run_until_complete(self.srv.wait_closed()) loop.run_until_complete(self.handler.finish_connections(1.0)) loop.run_until_complete(self.app.finish()) if __name__ == '__main__': HOST = '127.0.0.1' P
ORT = '56789' loop = asyncio.get_event_loop() server = UTF8Server() server.start_server(loop, HOST, PORT) print("serving on", server.srv.sockets[0].getsockname()) try: task = asyncio.ensure_future(get_data(HOST, PORT))
loop.run_until_complete(task) finally: server.stop_server(loop) loop.close()
../../../../../../share/pyshared/twi
sted/pers
isted/journal/base.py
__author
__ = 'mFoxRU' import scipy.signal as sps from abstractwavelet import AbstractWavelet class Ricker(AbstractWavelet): name = 'Ricker(MHAT)' params = {} def fn(self): return sps.rick
er
.option.global_options import GlobalOptions from pants.option.optionable import Optionable from pants.option.options import Options from pants.option.scope import GLOBAL_SCOPE, ScopeInfo from pants.util.dirutil import read_file from pants.util.memo import memoized_method, memoized_property from pants.util.ordered_set import FrozenOrderedSet from pants.util.strutil import ensure_text # This is a temporary hack that allows us to note the fact that we're in v2-exclusive mode # in a static location, as soon as we know it. This way code that cannot access options # can still use this information to customize behavior. Again, this is a temporary hack # to provide a better v2 experience to users who are not (and possibly never have been) # running v1, and should go away ASAP. class IsV2Exclusive: def __init__(self): self._value = False d
ef set(self): self._value = True def __bool__(self): return self._value is_v2_exclusive = IsV2Exclusive() @dataclass(frozen=True) class OptionsBootstrapper: """Holds the result of the first stage of options parsing, and assists with parsing full options.""" env_tuples: Tuple[Tuple[str, str], ...] bootstrap_args:
Tuple[str, ...] args: Tuple[str, ...] config: Config @staticmethod def get_config_file_paths(env, args) -> List[str]: """Get the location of the config files. The locations are specified by the --pants-config-files option. However we need to load the config in order to process the options. This method special-cases --pants-config-files in order to solve this chicken-and-egg problem. Note that, obviously, it's not possible to set the location of config files in a config file. Doing so will have no effect. """ # This exactly mirrors the logic applied in Option to all regular options. Note that we'll # also parse --pants-config as a regular option later, but there's no harm in that. In fact, # it's preferable, so that any code that happens to want to know where we read config from # can inspect the option. flag = "--pants-config-files=" evars = [ "PANTS_GLOBAL_PANTS_CONFIG_FILES", "PANTS_PANTS_CONFIG_FILES", "PANTS_CONFIG_FILES", ] path_list_values = [] default = get_default_pants_config_file() if Path(default).is_file(): path_list_values.append(ListValueComponent.create(default)) for var in evars: if var in env: path_list_values.append(ListValueComponent.create(env[var])) break for arg in args: # Technically this is very slightly incorrect, as we don't check scope. But it's # very unlikely that any task or subsystem will have an option named --pants-config-files. # TODO: Enforce a ban on options with a --pants- prefix outside our global options? if arg.startswith(flag): path_list_values.append(ListValueComponent.create(arg[len(flag) :])) return ListValueComponent.merge(path_list_values).val @staticmethod def parse_bootstrap_options( env: Mapping[str, str], args: Sequence[str], config: Config ) -> Options: bootstrap_options = Options.create( env=env, config=config, known_scope_infos=[GlobalOptions.get_scope_info()], args=args, ) def register_global(*args, **kwargs): ## Only use of Options.register? bootstrap_options.register(GLOBAL_SCOPE, *args, **kwargs) GlobalOptions.register_bootstrap_options(register_global) opts = bootstrap_options.for_global_scope() if opts.v2 and not opts.v1 and opts.backend_packages == []: is_v2_exclusive.set() return bootstrap_options @classmethod def create( cls, env: Optional[Mapping[str, str]] = None, args: Optional[Sequence[str]] = None, ) -> "OptionsBootstrapper": """Parses the minimum amount of configuration necessary to create an OptionsBootstrapper. :param env: An environment dictionary, or None to use `os.environ`. :param args: An args array, or None to use `sys.argv`. """ env = { k: v for k, v in (os.environ if env is None else env).items() if k.startswith("PANTS_") } args = tuple(sys.argv if args is None else args) flags = set() short_flags = set() # We can't use pants.engine.fs.FileContent here because it would cause a circular dep. @dataclass(frozen=True) class FileContent: path: str content: bytes def filecontent_for(path: str) -> FileContent: return FileContent(ensure_text(path), read_file(path, binary_mode=True),) def capture_the_flags(*args: str, **kwargs) -> None: for arg in args: flags.add(arg) if len(arg) == 2: short_flags.add(arg) elif kwargs.get("type") == bool: flags.add(f"--no-{arg[2:]}") GlobalOptions.register_bootstrap_options(capture_the_flags) def is_bootstrap_option(arg: str) -> bool: components = arg.split("=", 1) if components[0] in flags: return True for flag in short_flags: if arg.startswith(flag): return True return False # Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line. # Stop before '--' since args after that are pass-through and may have duplicate names to our # bootstrap options. bargs = tuple( filter(is_bootstrap_option, itertools.takewhile(lambda arg: arg != "--", args)) ) config_file_paths = cls.get_config_file_paths(env=env, args=args) config_files_products = [filecontent_for(p) for p in config_file_paths] pre_bootstrap_config = Config.load_file_contents(config_files_products) initial_bootstrap_options = cls.parse_bootstrap_options(env, bargs, pre_bootstrap_config) bootstrap_option_values = initial_bootstrap_options.for_global_scope() # Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped # from (typically pants.toml), then config override, then rcfiles. full_config_paths = pre_bootstrap_config.sources() if bootstrap_option_values.pantsrc: rcfiles = [ os.path.expanduser(str(rcfile)) for rcfile in bootstrap_option_values.pantsrc_files ] existing_rcfiles = list(filter(os.path.exists, rcfiles)) full_config_paths.extend(existing_rcfiles) full_config_files_products = [filecontent_for(p) for p in full_config_paths] post_bootstrap_config = Config.load_file_contents( full_config_files_products, seed_values=bootstrap_option_values.as_dict(), ) env_tuples = tuple(sorted(env.items(), key=lambda x: x[0])) return cls( env_tuples=env_tuples, bootstrap_args=bargs, args=args, config=post_bootstrap_config ) @memoized_property def env(self) -> Dict[str, str]: return dict(self.env_tuples) @memoized_property def bootstrap_options(self) -> Options: """The post-bootstrap options, computed from the env, args, and fully discovered Config. Re-computing options after Config has been fully expanded allows us to pick up bootstrap values (such as backends) from a config override file, for example. Because this can be computed from the in-memory representation of these values, it is not part of the object's identity. """ return self.parse_bootstrap_options(self.env, self.bootstrap_args, self.config) def get_bootstrap_options(self) -> Options: """Returns an Options instance that only knows about the bootstrap options.""" return self.bootstrap_options @memoized_method def _full_options(sel
import redis class RedisAdapter: storage = None host = None port = None def __init__(self, host, port): if not self.storage: self.sto
rage = redis.StrictRedis(host=host, port=int(port), db=0) def get_storage(self)
: return self.storage
# # Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. cl
ass DictUtils(object): ''' Provides dict services ''' @staticmethod def exclude(dct, keys=[]): """ Removes given items from the disct @param dct: the ditc to look at @param keys: the keys of items to pop @return
: updated dict """ if dct: for key in keys: if dct.has_key(key): dct.pop(key)
import os import numpy from numpy.distutils.misc_util impor
t Configuration def configuration(parent_package="", top_path=None): config = Configuration("tree", parent_package, top_path) libraries = [] if os.name == 'posix': libraries.append('m') config.add_extension("_tree", sources=["_tree.c"], include_dirs=[numpy.get_include()], libraries=libraries,
extra_compile_args=["-O3"]) config.add_subpackage("tests") return config if __name__ == "__main__": from numpy.distutils.core import setup setup(**configuration().todict())
sts", dest="showTests", help="Show all discovered tests", action="store_true", default=False) group.add_option("", "--use-processes", dest="useProcesses", help="Run tests in parallel with processes (not threads)", action="store_true", default=useProcessesIsDefault) group.add_option("", "--use-threads", dest="useProcesses", help="Run tests in parallel with threads (not processes)", action="store_false", default=useProcessesIsDefault) parser.add_option_group(group) (opts, args) = parser.parse_args() if opts.show_version: print("lit %s" % (lit.__version__,)) return
if not args: parser.error('No inputs specified') if opts.numThreads is None: # Python <2.5 has a race condition causing lit to always fail with numThreads>1 # http://bugs.python.org/issue1731717 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple # threads by defau
lt there. if sys.hexversion >= 0x2050200: opts.numThreads = lit.util.detectCPUs() else: opts.numThreads = 1 inputs = args # Create the user defined parameters. userParams = dict(builtinParameters) for entry in opts.userParameters: if '=' not in entry: name,opc = entry,'' else: name,opc = entry.split('=', 1) userParams[name] = opc # Decide what the requested maximum indvidual test time should be if opts.maxIndividualTestTime != None: maxIndividualTestTime = opts.maxIndividualTestTime else: # Default is zero maxIndividualTestTime = 0 # Create the global config object. litConfig = lit.LitConfig.LitConfig( progname = os.path.basename(sys.argv[0]), path = opts.path, quiet = opts.quiet, useValgrind = opts.useValgrind, valgrindLeakCheck = opts.valgrindLeakCheck, valgrindArgs = opts.valgrindArgs, noExecute = opts.noExecute, debug = opts.debug, isWindows = isWindows, params = userParams, config_prefix = opts.configPrefix, maxIndividualTestTime = maxIndividualTestTime) # Perform test discovery. run = lit.run.Run(litConfig, lit.discovery.find_tests_for_inputs(litConfig, inputs)) # After test discovery the configuration might have changed # the maxIndividualTestTime. If we explicitly set this on the # command line then override what was set in the test configuration if opts.maxIndividualTestTime != None: if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime: litConfig.note(('The test suite configuration requested an individual' ' test timeout of {0} seconds but a timeout of {1} seconds was' ' requested on the command line. Forcing timeout to be {1}' ' seconds') .format(litConfig.maxIndividualTestTime, opts.maxIndividualTestTime)) litConfig.maxIndividualTestTime = opts.maxIndividualTestTime if opts.showSuites or opts.showTests: # Aggregate the tests by suite. suitesAndTests = {} for result_test in run.tests: if result_test.suite not in suitesAndTests: suitesAndTests[result_test.suite] = [] suitesAndTests[result_test.suite].append(result_test) suitesAndTests = list(suitesAndTests.items()) suitesAndTests.sort(key = lambda item: item[0].name) # Show the suites, if requested. if opts.showSuites: print('-- Test Suites --') for ts,ts_tests in suitesAndTests: print(' %s - %d tests' %(ts.name, len(ts_tests))) print(' Source Root: %s' % ts.source_root) print(' Exec Root : %s' % ts.exec_root) if ts.config.available_features: print(' Available Features : %s' % ' '.join( sorted(ts.config.available_features))) # Show the tests, if requested. if opts.showTests: print('-- Available Tests --') for ts,ts_tests in suitesAndTests: ts_tests.sort(key = lambda test: test.path_in_suite) for test in ts_tests: print(' %s' % (test.getFullName(),)) # Exit. sys.exit(0) # Select and order the tests. numTotalTests = len(run.tests) # First, select based on the filter expression if given. if opts.filter: try: rex = re.compile(opts.filter) except: parser.error("invalid regular expression for --filter: %r" % ( opts.filter)) run.tests = [result_test for result_test in run.tests if rex.search(result_test.getFullName())] # Then select the order. if opts.shuffle: random.shuffle(run.tests) elif opts.incremental: sort_by_incremental_cache(run) else: run.tests.sort(key = lambda t: (not t.isEarlyTest(), t.getFullName())) # Finally limit the number of tests, if desired. if opts.maxTests is not None: run.tests = run.tests[:opts.maxTests] # Don't create more threads than tests. opts.numThreads = min(len(run.tests), opts.numThreads) # Because some tests use threads internally, and at least on Linux each # of these threads counts toward the current process limit, try to # raise the (soft) process limit so that tests don't fail due to # resource exhaustion. try: cpus = lit.util.detectCPUs() desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor # Import the resource module here inside this try block because it # will likely fail on Windows. import resource max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC) desired_limit = min(desired_limit, max_procs_hard) if max_procs_soft < desired_limit: resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard)) litConfig.note('raised the process limit from %d to %d' % \ (max_procs_soft, desired_limit)) except: pass extra = '' if len(run.tests) != numTotalTests: extra = ' of %d' % numTotalTests header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra, opts.numThreads) progressBar = None if not opts.quiet: if opts.succinct and opts.useProgressBar: try: tc = lit.ProgressBar.TerminalController() progressBar = lit.ProgressBar.ProgressBar(tc, header) except ValueError: print(header) progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ') else: print(header) startTime = time.time() display = TestingProgressDisplay(opts, len(run.tests), progressBar) try: run.execute_tests(display, opts.numThreads, opts.maxTime, opts.useProcesses) except KeyboardInterrupt: sys.exit(2) display.finish() testing_time = time.time() - startTime if not opts.quiet: print('Testing Time: %.2fs' % (testing_time,)) # Write out the test data, if requested. if opts.output_path is not None: write_test_results(run, litConfig, testing_time, opts.output_path) # List test results organized by kind. hasFailures = False byCode = {} for test in run.tests: if test.result.code not in byCode: byCode[test.result.code] = [] byCode[test.result.code].append(test) if test.result.code.isFailure: hasFailures = True # Print each test in any of the failing groups. for title,code in (('Unexpected Passing Tests', lit.Test.XPASS), ('Failing Tests', lit.Test.FAIL), ('Unresolved Tests', lit.Test.UNRESOLVED