prompt
listlengths
1
1
compression_prompt
listlengths
1
1
target
stringlengths
1.03k
828k
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom base64 import standard_b64decode\nfrom functools import wraps\nfrom urllib.parse import unquote\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography....
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom base64 import standard_b64decode\nfrom functools import wraps\nfrom urllib.parse import unquote\n\nfrom cryptography.hazmat.backends import default_backend\nfr...
```python # -*- coding: utf-8 -*- import os from base64 import standard_b64decode from functools import wraps from urllib.parse import unquote from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes import flask from flask.json import jsonify from pyload.core.api import Destination from pyload.core.utils.convert import to_str from pyload.core.utils.misc import eval_js #: url_prefix here is intentional since it should not be affected py path prefix bp = flask.Blueprint("flash", __name__, url_prefix="/") #: decorator def local_check(func): @wraps(func) def wrapper(*args, **kwargs): remote_addr = flask.request.environ.get("REMOTE_ADDR", "0") http_host = flask.request.environ.get("HTTP_HOST", "0") if remote_addr in ("127.0.0.1", "::ffff:127.0.0.1", "::1", "localhost") or http_host in ( "127.0.0.1:9666", "[::1]:9666", ): return func(*args, **kwargs) else: return "Forbidden", 403 return wrapper @bp.after_request def add_cors(response): response.headers.update({ 'Access-Control-Max-Age': 1800, 'Access-Control-Allow-Origin': "*", 'Access-Control-Allow-Methods': "OPTIONS, GET, POST" }) return response @bp.route("/flash/", methods=["GET", "POST"], endpoint="index") @bp.route("/flash/<id>", methods=["GET", "POST"], endpoint="index") @local_check def index(id="0"): return "JDownloader\r\n" @bp.route("/flash/add", methods=["POST"], endpoint="add") @local_check def add(): package = flask.request.form.get( "package", flask.request.form.get("source", flask.request.form.get("referer")) ) urls = [url for url in flask.request.form["urls"].replace(' ', '\n').split("\n") if url.strip()] if not urls: return jsonify(False) api = flask.current_app.config["PYLOAD_API"] try: if package: api.add_package(package, urls, Destination.COLLECTOR) else: api.generate_and_add_packages(urls, Destination.COLLECTOR) except Exception as e: return "failed " + e.args[0] + "\r\n" return "success\r\n" @bp.route("/flash/addcrypted", methods=["POST"], endpoint="addcrypted") @local_check def addcrypted(): api = flask.current_app.config["PYLOAD_API"] package = flask.request.form.get( "package", flask.request.form.get("source", flask.request.form.get("referer")) ) dl_path = api.get_config_value("general", "storage_folder") dlc_path = os.path.join( dl_path, package.replace("/", "").replace("\\", "").replace(":", "") + ".dlc" ) dlc = flask.request.form["crypted"].replace(" ", "+") with open(dlc_path, mode="wb") as fp: fp.write(dlc) try: api.add_package(package, [dlc_path], Destination.COLLECTOR) except Exception: flask.abort(500) else: return "success\r\n" @bp.route("/flash/addcrypted2", methods=["POST"], endpoint="addcrypted2") @local_check def addcrypted2(): package = flask.request.form.get( "package", flask.request.form.get("source", flask.request.form.get("referer")) ) crypted = flask.request.form["crypted"] jk = flask.request.form["jk"] crypted = standard_b64decode(unquote(crypted.replace(" ", "+"))) jk = eval_js(f"{jk} f()") try: IV = key = bytes.fromhex(jk) except Exception: return "Could not decrypt key", 500 cipher = Cipher( algorithms.AES(key), modes.CBC(IV), backend=default_backend() ) decryptor = cipher.decryptor() decrypted = decryptor.update(crypted) + decryptor.finalize() urls = to_str(decrypted).replace("\x00", "").replace("\r", "").split("\n") urls = [url for url in urls if url.strip()] api = flask.current_app.config["PYLOAD_API"] try: if package: api.add_package(package, urls, Destination.COLLECTOR) else: api.generate_and_add_packages(urls, Destination.COLLECTOR) except Exception: return "failed can't add", 500 else: return "success\r\n" @bp.route("/flashgot", methods=["POST"], endpoint="flashgot") @bp.route("/flashgot_pyload", methods=["POST"], endpoint="flashgot") @local_check def flashgot(): if flask.request.referrer not in ( "http://localhost:9666/flashgot", "http://127.0.0.1:9666/flashgot", ): flask.abort(500) package = flask.request.form.get("package") urls = [url for url in flask.request.form["urls"].split("\n") if url.strip()] # folder = flask.request.form.get('dir', None) autostart = int(flask.request.form.get("autostart", 0)) api = flask.current_app.config["PYLOAD_API"] if package: api.add_package(package, urls, Destination.QUEUE if autostart else Destination.COLLECTOR) else: api.generate_and_add_packages(urls, Destination.QUEUE if autostart else Destination.COLLECTOR) @bp.route("/crossdomain.xml", endpoint="crossdomain") @local_check def crossdomain(): rep = '<?xml version="1.0"?>\n' rep += '<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">\n' rep += "<cross-domain-policy>\n" rep += '<allow-access-from domain="*" />\n' rep += "</cross-domain-policy>" return rep @bp.route("/flash/checkSupportForUrl", methods=["POST"], endpoint="checksupport") @local_check def checksupport(): api = flask.current_app.config["PYLOAD_API"] url = flask.request.form["url"] res = api.check_urls([url]) supported = not res[0][1] is None return str(supported).lower() @bp.route("/jdcheck.js", endpoint="jdcheck") @local_check def jdcheck(): rep = "jdownloader=true;\r\n" rep += "var version='42707';\r\n" return rep ```
[ { "content": "Repeat the following code:\n```python\nclass WordBranch(object):\r\n \"\"\"WordBranch represents a single branch in the tree of all the valid word combinations.\r\n\r\n Attributes:\r\n letter_branch (LetterBranch) The reference to the LetterBranch that represents the word.\r\n ...
[ { "content": "Repeat the following code:\n<|memory_start|>```python\nclass WordBranch(object):\r\n \"\"\"WordBranch represents a single branch in the tree of all the valid word combinations.\r\n\r\n Attributes:\r\n letter_branch (LetterBranch) The reference to the LetterBranch that represents the...
```python class WordBranch(object): """WordBranch represents a single branch in the tree of all the valid word combinations. Attributes: letter_branch (LetterBranch) The reference to the LetterBranch that represents the word. origin (WordBranch) The reference to the parent WordBranch. remain_char (int) Number of characters remaining in the remain_dict. valid_children ([WordBranch]) Array of WordBranches leading to valid anagrams. """ def __init__(self, letter_branch, origin, remain_char, valid_children): self.letter_branch = letter_branch self.origin = origin self.remain_char = remain_char self.valid_children = valid_children def __str__(self): '''Trace words from leaf branch to root. Args self (WordBranch) The leaf branch to trace for word. Returns (string) The full string of represented by the leaf. ''' output_str = '' words = [] pointer = self while pointer.origin != None: words.append(pointer) pointer = pointer.origin words.reverse() # Put words in the right order for word in words: output_str += str(word.letter_branch) + ' ' # Remove last char --> ' ' return output_str[:-1] hash_to_branch = {} def get_word_tree_root(phrase_len, phrase_dict, words): '''Construct the root object of the WordBranch tree. Args phrase_len (int) Count of valid characters in phrase. phrase_dict ({char => int}) The remaining letters of the phrase. words ([LetterBranch]) Array of all the available words as LetterBranch. Returns (WordBranch) The root of WordBranch tree. ''' global hash_to_branch hash_to_branch = {} # Reset hash to branch on new tree root root_children = [] root = WordBranch(None, None, phrase_len, None) for word in words: root_children.append(WordBranch(word, root, phrase_len - len(str(word)), None)) return root, root_children def get_hash_to_branch(): global hash_to_branch return hash_to_branch ```
[ { "content": "```python\n#!/usr/bin/env python\n# Copyright (c) 2012 Cloudera, Inc. All rights reserved.\n# Validates all aggregate functions across all datatypes\n#\nimport logging\nimport pytest\nfrom tests.common.test_vector import *\nfrom tests.common.impala_test_suite import ImpalaTestSuite\nfrom tests.com...
[ { "content": "<|memory_start|>```python\n#!/usr/bin/env python\n# Copyright (c) 2012 Cloudera, Inc. All rights reserved.\n# Validates all aggregate functions across all datatypes\n#\nimport logging\nimport pytest\nfrom tests.common.test_vector import *\nfrom tests.common.impala_test_suite import ImpalaTestSuite...
```python #!/usr/bin/env python # Copyright (c) 2012 Cloudera, Inc. All rights reserved. # Validates all aggregate functions across all datatypes # import logging import pytest from tests.common.test_vector import * from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.test_dimensions import create_exec_option_dimension from tests.common.test_dimensions import create_uncompressed_text_dimension from tests.util.test_file_parser import QueryTestSectionReader agg_functions = ['sum', 'count', 'min', 'max', 'avg'] data_types = ['int', 'bool', 'double', 'bigint', 'tinyint', 'smallint', 'float', 'timestamp'] result_lut = { # TODO: Add verification for other types 'sum-tinyint': 45000, 'avg-tinyint': 5, 'count-tinyint': 9000, 'min-tinyint': 1, 'max-tinyint': 9, 'sum-smallint': 495000, 'avg-smallint': 50, 'count-smallint': 9900, 'min-smallint': 1, 'max-smallint': 99, 'sum-int': 4995000, 'avg-int': 500, 'count-int': 9990, 'min-int': 1, 'max-int': 999, 'sum-bigint': 49950000, 'avg-bigint': 5000, 'count-bigint': 9990, 'min-bigint': 10, 'max-bigint': 9990, } class TestAggregation(ImpalaTestSuite): @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestAggregation, cls).add_test_dimensions() # Add two more dimensions cls.TestMatrix.add_dimension(TestDimension('agg_func', *agg_functions)) cls.TestMatrix.add_dimension(TestDimension('data_type', *data_types)) cls.TestMatrix.add_constraint(lambda v: cls.is_valid_vector(v)) @classmethod def is_valid_vector(cls, vector): data_type, agg_func = vector.get_value('data_type'), vector.get_value('agg_func') file_format = vector.get_value('table_format').file_format if file_format not in ['parquet']: return False if cls.exploration_strategy() == 'core': # Reduce execution time when exploration strategy is 'core' if vector.get_value('exec_option')['batch_size'] != 0: return False # Avro doesn't have timestamp type if file_format == 'avro' and data_type == 'timestamp': return False elif agg_func not in ['min', 'max', 'count'] and data_type == 'bool': return False elif agg_func == 'sum' and data_type == 'timestamp': return False return True def test_aggregation(self, vector): data_type, agg_func = (vector.get_value('data_type'), vector.get_value('agg_func')) query = 'select %s(%s_col) from alltypesagg where day is not null' % (agg_func, data_type) result = self.execute_scalar(query, vector.get_value('exec_option'), table_format=vector.get_value('table_format')) if 'int' in data_type: assert result_lut['%s-%s' % (agg_func, data_type)] == int(result) # AVG if vector.get_value('data_type') == 'timestamp' and\ vector.get_value('agg_func') == 'avg': return query = 'select %s(DISTINCT(%s_col)) from alltypesagg where day is not null' % ( agg_func, data_type) result = self.execute_scalar(query, vector.get_value('exec_option')) class TestAggregationQueries(ImpalaTestSuite): """Run the aggregation test suite, with codegen enabled and disabled, to exercise our non-codegen code""" @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestAggregationQueries, cls).add_test_dimensions() cls.TestMatrix.add_dimension( create_exec_option_dimension(disable_codegen_options=[False, True])) if cls.exploration_strategy() == 'core': cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload())) @pytest.mark.execute_serially def test_non_codegen_tinyint_grouping(self, vector): # Regression for IMPALA-901. The test includes an INSERT statement, so can only be run # on INSERT-able formats - text only in this case, since the bug doesn't depend on the # file format. if vector.get_value('table_format').file_format == 'text' \ and vector.get_value('table_format').compression_codec == 'none': self.run_test_case('QueryTest/aggregation_no_codegen_only', vector) def test_aggregation(self, vector): if vector.get_value('table_format').file_format == 'hbase': pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results") self.run_test_case('QueryTest/aggregation', vector) def test_distinct(self, vector): if vector.get_value('table_format').file_format == 'hbase': pytest.xfail("HBase returns columns in alphabetical order for select distinct *, " "making the result verication to fail.") self.run_test_case('QueryTest/distinct', vector) ```
[ { "content": "Repeat the code precisely as written (spacing intact):\n```python\n# Copyright 2014 - StackStorm, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:/...
[ { "content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# Copyright 2014 - StackStorm, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License...
```python # Copyright 2014 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.urls import reverse from django.urls import reverse_lazy from django.utils.translation import ugettext_lazy as _ from django.views import generic from horizon import exceptions from horizon import forms from horizon import tables from mistraldashboard import api from mistraldashboard.workbooks import forms as mistral_forms from mistraldashboard.workbooks import tables as mistral_tables class IndexView(tables.DataTableView): table_class = mistral_tables.WorkbooksTable template_name = 'mistral/workbooks/index.html' def get_data(self): return api.workbook_list(self.request) class DetailView(generic.TemplateView): template_name = 'mistral/workbooks/detail.html' page_title = _("Workbook Definition") def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) workbook = self.get_data(self.request, **kwargs) breadcrumb = [(workbook.name, reverse( 'horizon:mistral:workbooks:detail', args=[workbook.name] ))] context["custom_breadcrumb"] = breadcrumb context['definition'] = workbook.definition return context def get_data(self, request, **kwargs): try: workbook_name = kwargs['workbook_name'] workbook = api.workbook_get(request, workbook_name) except Exception: msg = _('Unable to get workbook "%s".') % workbook_name redirect = reverse('horizon:mistral:workbooks:index') exceptions.handle(self.request, msg, redirect=redirect) return workbook class SelectDefinitionView(forms.ModalFormView): template_name = 'mistral/workbooks/select_definition.html' modal_header = _("Create Workbook") form_id = "select_definition" form_class = mistral_forms.DefinitionForm submit_label = _("Validate") submit_url = reverse_lazy("horizon:mistral:workbooks:select_definition") success_url = reverse_lazy('horizon:mistral:workbooks:create') page_title = _("Select Definition") def get_form_kwargs(self): kwargs = super(SelectDefinitionView, self).get_form_kwargs() kwargs['next_view'] = CreateView return kwargs class ChangeDefinitionView(SelectDefinitionView): modal_header = _("Update Workbook") submit_url = reverse_lazy("horizon:mistral:workbooks:change_definition") success_url = reverse_lazy('horizon:mistral:workbooks:update') page_title = _("Update Definition") def get_form_kwargs(self): kwargs = super(ChangeDefinitionView, self).get_form_kwargs() kwargs['next_view'] = UpdateView return kwargs class CreateView(forms.ModalFormView): template_name = 'mistral/workbooks/create.html' modal_header = _("Create Workbook") form_id = "create_workbook" form_class = mistral_forms.CreateForm submit_label = _("Create") submit_url = reverse_lazy("horizon:mistral:workbooks:create") success_url = reverse_lazy('horizon:mistral:workbooks:index') page_title = _("Create Workbook") def get_initial(self): initial = {} if 'definition' in self.kwargs: initial['definition'] = self.kwargs['definition'] return initial class UpdateView(CreateView): template_name = 'mistral/workbooks/update.html' modal_header = _("Update Workbook") form_id = "update_workbook" form_class = mistral_forms.UpdateForm submit_label = _("Update") submit_url = reverse_lazy("horizon:mistral:workbooks:update") page_title = _("Update Workbook") ```
[ { "content": "Reproduce the code exactly as provided (keep formatting):\n```python\nimport json\nimport tornado.web\n\nimport logging\nlogger = logging.getLogger('boilerplate.' + __name__)\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n \"\"\"A class to collect common handler methods - all other handle...
[ { "content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\nimport json\nimport tornado.web\n\nimport logging\nlogger = logging.getLogger('boilerplate.' + __name__)\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n \"\"\"A class to collect common handler methods - ...
```python import json import tornado.web import logging logger = logging.getLogger('boilerplate.' + __name__) class BaseHandler(tornado.web.RequestHandler): """A class to collect common handler methods - all other handlers should subclass this one. """ def load_json(self): """Load JSON from the request body and store them in self.request.arguments, like Tornado does by default for POSTed form parameters. If JSON cannot be decoded, raises an HTTPError with status 400. """ try: self.request.arguments = json.loads(self.request.body) except ValueError: msg = "Could not decode JSON: %s" % self.request.body logger.debug(msg) raise tornado.web.HTTPError(400, msg) def get_json_argument(self, name, default=None): """Find and return the argument with key 'name' from JSON request data. Similar to Tornado's get_argument() method. """ if default is None: default = self._ARG_DEFAULT if not self.request.arguments: self.load_json() if name not in self.request.arguments: if default is self._ARG_DEFAULT: msg = "Missing argument '%s'" % name logger.debug(msg) raise tornado.web.HTTPError(400, msg) logger.debug("Returning default argument %s, as we couldn't find " "'%s' in %s" % (default, name, self.request.arguments)) return default arg = self.request.arguments[name] logger.debug("Found '%s': %s in JSON arguments" % (name, arg)) return arg def write_error(self, status_code, **kwargs): self.write("Gosh darnit, user! You caused a %d error." % status_code) self.write( "<br/>" ) self.write( "%s"%(kwargs,) ) # self.write(json_encode(kwargs)) def prepare(self): self.set_header("Power by","blueblue") # self.write("Gosh darnit, user! ") pass def on_finish(self): pass def get(self): self.req() def post(self): self.req() def req(self): # self.ROOT = settings.ROOT # self.MEDIA_ROOT = settings.MEDIA_ROOT # self.TEMPLATE_ROOT = settings.MEDIA_ROOT pass ```
[ { "content": "Recreate the original code text:\n```python\n#!/usr/bin/env python\nfrom __future__ import division\nfrom __future__ import print_function\nfrom builtins import input\nfrom builtins import range\nfrom past.utils import old_div\nimport sys\nimport matplotlib\nif matplotlib.get_backend() != \"TKAgg\...
[ { "content": "Recreate the original code text:\n<|memory_start|>```python\n#!/usr/bin/env python\nfrom __future__ import division\nfrom __future__ import print_function\nfrom builtins import input\nfrom builtins import range\nfrom past.utils import old_div\nimport sys\nimport matplotlib\nif matplotlib.get_backe...
```python #!/usr/bin/env python from __future__ import division from __future__ import print_function from builtins import input from builtins import range from past.utils import old_div import sys import matplotlib if matplotlib.get_backend() != "TKAgg": matplotlib.use("TKAgg") import pmagpy.pmag as pmag import pmagpy.pmagplotlib as pmagplotlib def main(): """ NAME lowrie.py DESCRIPTION plots intensity decay curves for Lowrie experiments SYNTAX lowrie -h [command line options] INPUT takes SIO formatted input files OPTIONS -h prints help message and quits -f FILE: specify input file -N do not normalize by maximum magnetization -fmt [svg, pdf, eps, png] specify fmt, default is svg -sav save plots and quit """ fmt,plot='svg',0 FIG={} # plot dictionary FIG['lowrie']=1 # demag is figure 1 pmagplotlib.plot_init(FIG['lowrie'],6,6) norm=1 # default is to normalize by maximum axis if len(sys.argv)>1: if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-N' in sys.argv: norm=0 # don't normalize if '-sav' in sys.argv: plot=1 # don't normalize if '-fmt' in sys.argv: # sets input filename ind=sys.argv.index("-fmt") fmt=sys.argv[ind+1] if '-f' in sys.argv: # sets input filename ind=sys.argv.index("-f") in_file=sys.argv[ind+1] else: print(main.__doc__) print('you must supply a file name') sys.exit() else: print(main.__doc__) print('you must supply a file name') sys.exit() data=open(in_file).readlines() # open the SIO format file PmagRecs=[] # set up a list for the results keys=['specimen','treatment','csd','M','dec','inc'] for line in data: PmagRec={} rec=line.replace('\n','').split() for k in range(len(keys)): PmagRec[keys[k]]=rec[k] PmagRecs.append(PmagRec) specs=pmag.get_dictkey(PmagRecs,'specimen','') sids=[] for spec in specs: if spec not in sids:sids.append(spec) # get list of unique specimen names for spc in sids: # step through the specimen names print(spc) specdata=pmag.get_dictitem(PmagRecs,'specimen',spc,'T') # get all this one's data DIMs,Temps=[],[] for dat in specdata: # step through the data DIMs.append([float(dat['dec']),float(dat['inc']),float(dat['M'])*1e-3]) Temps.append(float(dat['treatment'])) carts=pmag.dir2cart(DIMs).transpose() #if norm==1: # want to normalize # nrm=max(max(abs(carts[0])),max(abs(carts[1])),max(abs(carts[2]))) # by maximum of x,y,z values # ylab="M/M_max" if norm==1: # want to normalize nrm=(DIMs[0][2]) # normalize by NRM ylab="M/M_o" else: nrm=1. # don't normalize ylab="Magnetic moment (Am^2)" xlab="Temperature (C)" pmagplotlib.plotXY(FIG['lowrie'],Temps,old_div(abs(carts[0]),nrm),sym='r-') pmagplotlib.plotXY(FIG['lowrie'],Temps,old_div(abs(carts[0]),nrm),sym='ro') # X direction pmagplotlib.plotXY(FIG['lowrie'],Temps,old_div(abs(carts[1]),nrm),sym='c-') pmagplotlib.plotXY(FIG['lowrie'],Temps,old_div(abs(carts[1]),nrm),sym='cs') # Y direction pmagplotlib.plotXY(FIG['lowrie'],Temps,old_div(abs(carts[2]),nrm),sym='k-') pmagplotlib.plotXY(FIG['lowrie'],Temps,old_div(abs(carts[2]),nrm),sym='k^',title=spc,xlab=xlab,ylab=ylab) # Z direction files={'lowrie':'lowrie:_'+spc+'_.'+fmt} if plot==0: pmagplotlib.drawFIGS(FIG) ans=input('S[a]ve figure? [q]uit, <return> to continue ') if ans=='a': pmagplotlib.saveP(FIG,files) elif ans=='q': sys.exit() else: pmagplotlib.saveP(FIG,files) pmagplotlib.clearFIG(FIG['lowrie']) if __name__ == "__main__": main() ```
[ { "content": "Here is the script:\n```python\n\"\"\"\nDescription: \nRequirements: pySerial, wxPython Phoenix\n\nglossary and of other descriptions:\n\nDMM - digital multimeter\nPSU - power supply\nSBC - single board computer\n\nINS - general instrument commands\n\nGEN - general sequence instructions\n\n\"\"\"\...
[ { "content": "Here is the script:\n<|memory_start|>```python\n\"\"\"\nDescription: \nRequirements: pySerial, wxPython Phoenix\n\nglossary and of other descriptions:\n\nDMM - digital multimeter\nPSU - power supply\nSBC - single board computer\n\nINS - general instrument commands\n\nGEN - general sequence instruc...
```python """ Description: Requirements: pySerial, wxPython Phoenix glossary and of other descriptions: DMM - digital multimeter PSU - power supply SBC - single board computer INS - general instrument commands GEN - general sequence instructions """ import logging import sys import time import wx import theme import base class SetVoltage(wx.Dialog): def __init__(self, parent, instruments): wx.Dialog.__init__(self, parent, title="Set Voltage") panel = wx.Panel(self) sizer = wx.BoxSizer(wx.VERTICAL) hsizer = wx.BoxSizer(wx.HORIZONTAL) sbox = wx.StaticBox(panel, label="") sbox_sizer = wx.StaticBoxSizer(sbox, wx.HORIZONTAL) grid = wx.GridBagSizer(5,5) row = 0 # row += 1 #let's start at 1, to give some space lbl_psu = wx.StaticText(panel, label="Power Supply:") choices = ["Choose on execution"] choices.extend(instruments) self.cbox_psu = wx.ComboBox(panel, choices=choices) # self.cbox_psu.Bind(wx.EVT_COMBOBOX, self.OnPsuSelected) grid.Add(lbl_psu, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5) grid.Add(self.cbox_psu, pos=(row,1), span=(0,3), flag=wx.ALL|wx.EXPAND, border=5) grid.AddGrowableCol(1) row += 1 text_voltage = wx.StaticText(panel, label="Set Voltage:") self.spin_voltage = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1)) self.spin_voltage2 = wx.SpinCtrl(panel, max=99, min=0, size=(50, -1)) self.spin_voltage.Bind(wx.EVT_SPINCTRL, self.OnSpinVoltage) self.spin_voltage2.Bind(wx.EVT_SPINCTRL, self.OnSpinVoltage) self.lbl_voltage = wx.StaticText(panel, label="0.0v") grid.Add(text_voltage, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5) grid.Add(self.spin_voltage, pos=(row,1), flag=wx.ALL, border=5) grid.Add(self.spin_voltage2, pos=(row,2), flag=wx.ALL, border=5) grid.Add(self.lbl_voltage, pos=(row,3), flag=wx.ALL|wx.EXPAND, border=5) sbox_sizer.Add(grid, 1, wx.ALL|wx.EXPAND, 0) sbox_sizer.AddSpacer(10) #----- hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.AddStretchSpacer() btn_cancel = wx.Button(panel, label="Cancel", id=wx.ID_CANCEL) btn_cancel.Bind(wx.EVT_BUTTON, self.OnButton) self.btn_add = wx.Button(panel, label="Add", id=wx.ID_OK) self.btn_add.Bind(wx.EVT_BUTTON, self.OnButton) hsizer.Add(btn_cancel, 0, wx.ALL|wx.EXPAND, 5) hsizer.Add(self.btn_add, 0, wx.ALL|wx.EXPAND, 5) #add to main sizer sizer.Add(sbox_sizer, 0, wx.ALL|wx.EXPAND, 2) sizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 5) panel.SetSizer(sizer) w, h = sizer.Fit(self) # self.SetSize((w, h*1.5)) # self.SetMinSize((w, h*1.5)) # self.SetMaxSize(sizer.Fit(self)) try: self.SetIcon(theme.GetIcon("psu_png")) except: pass self.Bind(wx.EVT_KEY_UP, self.OnKeyUp) def OnKeyUp(self, event): key = event.GetKeyCode() print(event) if key == wx.KEY_ESCAPE: self.EndModal(wx.ID_CANCEL) def OnSpinVoltage(self, event=None): v0 = self.spin_voltage.GetValue() v1 = self.spin_voltage2.GetValue() label = str(v0) + "." + str(v1) + "v" self.lbl_voltage.SetLabel(label) def OnButton(self, event): e = event.GetEventObject() label = e.GetLabel() id = e.GetId() if label == "Cancel": self.EndModal(id) elif label == "Add": self.EndModal(id) def SetValue(self, data): params = data["parameters"] params = "), " + params[1:-1] + ", (" #so we can split it easier param_dict = {} params = params.split("), (") for param in params: param = param[1: -1] if param == "": continue key, value = param.split("', '") param_dict[key] = value self.cbox_psu.SetValue(param_dict["psu"]) self.lbl_voltage.SetLabel(param_dict["v0"]) spin1, spin2 = param_dict["v0"][:-1].split(".") self.spin_voltage.SetValue(spin1) self.spin_voltage2.SetValue(spin2) def GetValue(self): psu = self.cbox_psu.GetValue() if psu != "": for char in psu: if char.isdigit() or char.isalpha(): continue psu = psu.replace(char, "_") data = [("psu", psu), ("v0", self.lbl_voltage.GetLabel())] data = {"action":"Set Voltage", "parameters":str(data),} return data ```
[ { "content": "```python\n\"\"\"\nFunctions for working with broadcast database\n\nCopyright 2015, Outernet Inc.\nSome rights reserved.\n\nThis software is free software licensed under the terms of GPLv3. See COPYING\nfile that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.\n\"\"\"\n\nimport...
[ { "content": "<|memory_start|>```python\n\"\"\"\nFunctions for working with broadcast database\n\nCopyright 2015, Outernet Inc.\nSome rights reserved.\n\nThis software is free software licensed under the terms of GPLv3. See COPYING\nfile that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.\n...
```python """ Functions for working with broadcast database Copyright 2015, Outernet Inc. Some rights reserved. This software is free software licensed under the terms of GPLv3. See COPYING file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt. """ import sqlite3 import datetime import sqlize as sql from . import path OperationalError = sqlite3.OperationalError ProgrammingError = sqlite3.ProgrammingError class DB: TABLE = 'broadcasts' SCHEMA = """ create table if not exists broadcasts ( content_id text, server_id text, commit_hash text, title text, url text, size integer, collected timestamp, packed timestamp, aired timestamp, removed timestamp, expires timestamp ); """ def __init__(self, db=path.BROADCAST): self.con = sqlite3.connect(db) self.con.row_factory = sqlite3.Row self.create_table() def create_table(self): self.con.executescript(self.SCHEMA) def add_content(self, id, server, commit, title, url, size, collected, packed, aired, expires=None): q = sql.Insert(self.TABLE, cols=( 'content_id', 'server_id', 'commit_hash', 'title', 'url', 'size', 'collected', 'packed', 'aired', 'expires')) self.con.execute(str(q), { 'content_id': id, 'server_id': server, 'commit_hash': commit, 'title': title, 'url': url, 'size': size, 'collected': collected, 'packed': packed, 'aired': aired, 'expires': expires, }) self.con.commit() self.con.close() def remove_content(self, id): q = sql.Update(self.TABLE, 'content_id=:id', removed=':time') self.con.execute(str(q), {'id': id, 'time': datetime.datetime.today()}) self.con.commit() self.con.close() ```
[ { "content": "Here is the script:\n```python\n\"\"\"\nSteady-state channel flow in 3D\n===============================\n\nSteady state flow in a channel subject to bottom friction.\n\nThis test reproduces the \"channel\" test case found in GOTM test suite [1]\nand also [2].\n\nThis case tests the turbulence clo...
[ { "content": "Here is the script:\n<|memory_start|>```python\n\"\"\"\nSteady-state channel flow in 3D\n===============================\n\nSteady state flow in a channel subject to bottom friction.\n\nThis test reproduces the \"channel\" test case found in GOTM test suite [1]\nand also [2].\n\nThis case tests th...
```python """ Steady-state channel flow in 3D =============================== Steady state flow in a channel subject to bottom friction. This test reproduces the "channel" test case found in GOTM test suite [1] and also [2]. This case tests the turbulence closure model, vertical viscosity and bottom boundary layer. Water column is initially at rest. Circulation is driven by a constant elevation gradient until it reaches a steady state. Here the elevation gradient is replaced by an equivalent source term in the momentum equation. [1] http://www.gotm.net/ [2] Karna et al. (2012). Coupling of a discontinuous Galerkin finite element marine model with a finite difference turbulence closure model. Ocean Modelling, 47:55-64. http://dx.doi.org/10.1016/j.ocemod.2012.01.001 """ from thetis import * import numpy depth = 15.0 surf_slope = -1.0e-5 # d elev/dx def bottom_friction_test(layers=25, gls_closure='k-omega', stability_func='Canuto B', fast_convergence=False, verify=True, iterate=True, load_export_ix=None, **model_options): outputdir = 'outputs' # set mesh resolution dx = 2500.0 nx = 3 # nb elements in flow direction lx = nx*dx ny = 3 # nb elements in cross direction ly = ny*dx mesh2d = PeriodicRectangleMesh(nx, ny, lx, ly, direction='x', reorder=True) dt = 25.0 t_end = 12 * 3600.0 # sufficient to reach ~steady state if fast_convergence: t_end = 5 * 3600.0 t_export = 400.0 u_mag = 1.0 if os.getenv('THETIS_REGRESSION_TEST') is not None: t_end = 5*t_export # bathymetry p1_2d = get_functionspace(mesh2d, 'CG', 1) bathymetry2d = Function(p1_2d, name='Bathymetry') bathymetry2d.assign(depth) # create solver solver_obj = solver.FlowSolver(mesh2d, bathymetry2d, layers) options = solver_obj.options options.element_family = 'dg-dg' options.timestepper_type = 'SSPRK22' options.solve_salinity = False options.solve_temperature = False options.use_implicit_vertical_diffusion = True options.use_bottom_friction = True options.bottom_roughness = Constant(1.5e-3) options.use_turbulence = True options.vertical_viscosity = Constant(1.3e-6) # background value options.vertical_diffusivity = Constant(1.4e-7) # background value # options.use_ale_moving_mesh = False options.use_limiter_for_tracers = True options.simulation_export_time = t_export options.timestepper_options.use_automatic_timestep = False options.timestep = dt options.simulation_end_time = t_end options.horizontal_velocity_scale = Constant(u_mag) options.fields_to_export = ['uv_2d', 'elev_2d', 'elev_3d', 'uv_3d', 'uv_dav_2d', 'eddy_visc_3d', 'shear_freq_3d', 'tke_3d', 'psi_3d', 'eps_3d', 'len_3d', ] options.fields_to_export_hdf5 = ['uv_2d', 'elev_2d', 'uv_3d', 'eddy_visc_3d', 'eddy_diff_3d', 'shear_freq_3d', 'tke_3d', 'psi_3d', 'eps_3d', 'len_3d', ] options.update(model_options) turb_options = options.turbulence_model_options turb_options.apply_defaults(gls_closure) turb_options.stability_function_name = stability_func layer_str = 'nz{:}'.format(layers) odir = '_'.join([outputdir, layer_str, turb_options.closure_name.replace(' ', '-'), turb_options.stability_function_name.replace(' ', '-')]) options.output_directory = odir solver_obj.create_function_spaces() # drive flow with momentum source term equivalent to constant surface slope pressure_grad = -physical_constants['g_grav'] * surf_slope options.momentum_source_2d = Constant((pressure_grad, 0)) solver_obj.create_equations() xyz = SpatialCoordinate(solver_obj.mesh) if fast_convergence: # speed-up convergence by stating with u > 0 u_init_2d = 0.5 solver_obj.assign_initial_conditions(uv_2d=Constant((u_init_2d, 0))) # consistent 3d velocity with slope solver_obj.fields.uv_3d.project(as_vector((u_init_2d*0.3*(xyz[2]/depth + 0.5), 0, 0))) if iterate: print_output('Exporting to ' + options.output_directory) solver_obj.iterate() if verify and os.getenv('THETIS_REGRESSION_TEST') is None: # compare against logarithmic velocity profile # u = u_b / kappa * log((z + bath + z_0)/z_0) # estimate bottom friction velocity from maximal u u_max = 0.9 # max velocity in [2] Fig 2. l2_tol = 0.05 kappa = solver_obj.options.turbulence_model_options.kappa z_0 = options.bottom_roughness.dat.data[0] u_b = u_max * kappa / np.log((depth + z_0)/z_0) log_uv = Function(solver_obj.function_spaces.P1DGv, name='log velocity') log_uv.project(as_vector((u_b / kappa * ln((xyz[2] + depth + z_0)/z_0), 0, 0))) out = File(options.output_directory + '/log_uv.pvd') out.write(log_uv) uv_p1_dg = Function(solver_obj.function_spaces.P1DGv, name='velocity p1dg') uv_p1_dg.project(solver_obj.fields.uv_3d + solver_obj.fields.uv_dav_3d) volume = lx*ly*depth uv_l2_err = errornorm(log_uv, uv_p1_dg)/numpy.sqrt(volume) assert uv_l2_err < l2_tol, 'L2 error is too large: {:} > {:}'.format(uv_l2_err, l2_tol) print_output('L2 error {:.4f} PASSED'.format(uv_l2_err)) elif load_export_ix is not None: print_output('Loading state: {:}'.format(load_export_ix)) solver_obj.load_state(load_export_ix) return solver_obj if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description='Run bottom friction test case', formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('-n', '--nlevels', type=int, default=50, help='number of vertical levels') parser.add_argument('-m', '--model', default='k-epsilon', choices=['k-epsilon', 'k-omega', 'gls'], help='GLS turbulence closure model') parser.add_argument('-s', '--stability-func', default='Canuto-A', choices=['Canuto-A', 'Canuto-B', 'Cheng'], help='Stability function name') parser.add_argument('-v', '--verify', action='store_true', help='Verify correctness against log profile.') args = parser.parse_args() model = args.model if model == 'gls': model = 'Generic Length Scale' bottom_friction_test( layers=args.nlevels, gls_closure=model, stability_func=args.stability_func.replace('-', ' '), verify=args.verify) ```
[ { "content": "Here is the source code:\n```python\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:/...
[ { "content": "Here is the source code:\n<|memory_start|>```python\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\...
```python # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library for running BERT family models on SQuAD 1.1/2.0 in TF 2.x.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import os from absl import flags from absl import logging import tensorflow as tf from official.modeling import performance from official.nlp import optimization from official.nlp.bert import bert_models from official.nlp.bert import common_flags from official.nlp.bert import input_pipeline from official.nlp.bert import model_saving_utils from official.nlp.bert import model_training_utils from official.nlp.bert import squad_evaluate_v1_1 from official.nlp.bert import squad_evaluate_v2_0 from official.nlp.data import squad_lib_sp from official.utils.misc import keras_utils def define_common_squad_flags(): """Defines common flags used by SQuAD tasks.""" flags.DEFINE_enum( 'mode', 'train_and_eval', ['train_and_eval', 'train_and_predict', 'train', 'eval', 'predict', 'export_only'], 'One of {"train_and_eval", "train_and_predict", ' '"train", "eval", "predict", "export_only"}. ' '`train_and_eval`: train & predict to json files & compute eval metrics. ' '`train_and_predict`: train & predict to json files. ' '`train`: only trains the model. ' '`eval`: predict answers from squad json file & compute eval metrics. ' '`predict`: predict answers from the squad json file. ' '`export_only`: will take the latest checkpoint inside ' 'model_dir and export a `SavedModel`.') flags.DEFINE_string('train_data_path', '', 'Training data path with train tfrecords.') flags.DEFINE_string( 'input_meta_data_path', None, 'Path to file that contains meta data about input ' 'to be used for training and evaluation.') # Model training specific flags. flags.DEFINE_integer('train_batch_size', 32, 'Total batch size for training.') # Predict processing related. flags.DEFINE_string('predict_file', None, 'SQuAD prediction json file path. ' '`predict` mode supports multiple files: one can use ' 'wildcard to specify multiple files and it can also be ' 'multiple file patterns separated by comma. Note that ' '`eval` mode only supports a single predict file.') flags.DEFINE_bool( 'do_lower_case', True, 'Whether to lower case the input text. Should be True for uncased ' 'models and False for cased models.') flags.DEFINE_float( 'null_score_diff_threshold', 0.0, 'If null_score - best_non_null is greater than the threshold, ' 'predict null. This is only used for SQuAD v2.') flags.DEFINE_bool( 'verbose_logging', False, 'If true, all of the warnings related to data processing will be ' 'printed. A number of warnings are expected for a normal SQuAD ' 'evaluation.') flags.DEFINE_integer('predict_batch_size', 8, 'Total batch size for prediction.') flags.DEFINE_integer( 'n_best_size', 20, 'The total number of n-best predictions to generate in the ' 'nbest_predictions.json output file.') flags.DEFINE_integer( 'max_answer_length', 30, 'The maximum length of an answer that can be generated. This is needed ' 'because the start and end predictions are not conditioned on one ' 'another.') common_flags.define_common_bert_flags() FLAGS = flags.FLAGS def squad_loss_fn(start_positions, end_positions, start_logits, end_logits): """Returns sparse categorical crossentropy for start/end logits.""" start_loss = tf.keras.losses.sparse_categorical_crossentropy( start_positions, start_logits, from_logits=True) end_loss = tf.keras.losses.sparse_categorical_crossentropy( end_positions, end_logits, from_logits=True) total_loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2 return total_loss def get_loss_fn(): """Gets a loss function for squad task.""" def _loss_fn(labels, model_outputs): start_positions = labels['start_positions'] end_positions = labels['end_positions'] start_logits, end_logits = model_outputs return squad_loss_fn( start_positions, end_positions, start_logits, end_logits) return _loss_fn RawResult = collections.namedtuple('RawResult', ['unique_id', 'start_logits', 'end_logits']) def get_raw_results(predictions): """Converts multi-replica predictions to RawResult.""" for unique_ids, start_logits, end_logits in zip(predictions['unique_ids'], predictions['start_logits'], predictions['end_logits']): for values in zip(unique_ids.numpy(), start_logits.numpy(), end_logits.numpy()): yield RawResult( unique_id=values[0], start_logits=values[1].tolist(), end_logits=values[2].tolist()) def get_dataset_fn(input_file_pattern, max_seq_length, global_batch_size, is_training): """Gets a closure to create a dataset..""" def _dataset_fn(ctx=None): """Returns tf.data.Dataset for distributed BERT pretraining.""" batch_size = ctx.get_per_replica_batch_size( global_batch_size) if ctx else global_batch_size dataset = input_pipeline.create_squad_dataset( input_file_pattern, max_seq_length, batch_size, is_training=is_training, input_pipeline_context=ctx) return dataset return _dataset_fn def get_squad_model_to_predict(strategy, bert_config, checkpoint_path, input_meta_data): """Gets a squad model to make predictions.""" with strategy.scope(): # Prediction always uses float32, even if training uses mixed precision. tf.keras.mixed_precision.experimental.set_policy('float32') squad_model, _ = bert_models.squad_model( bert_config, input_meta_data['max_seq_length'], hub_module_url=FLAGS.hub_module_url) if checkpoint_path is None: checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir) logging.info('Restoring checkpoints from %s', checkpoint_path) checkpoint = tf.train.Checkpoint(model=squad_model) checkpoint.restore(checkpoint_path).expect_partial() return squad_model def predict_squad_customized(strategy, input_meta_data, predict_tfrecord_path, num_steps, squad_model): """Make predictions using a Bert-based squad model.""" predict_dataset_fn = get_dataset_fn( predict_tfrecord_path, input_meta_data['max_seq_length'], FLAGS.predict_batch_size, is_training=False) predict_iterator = iter( strategy.experimental_distribute_datasets_from_function( predict_dataset_fn)) @tf.function def predict_step(iterator): """Predicts on distributed devices.""" def _replicated_step(inputs): """Replicated prediction calculation.""" x, _ = inputs unique_ids = x.pop('unique_ids') start_logits, end_logits = squad_model(x, training=False) return dict( unique_ids=unique_ids, start_logits=start_logits, end_logits=end_logits) outputs = strategy.run(_replicated_step, args=(next(iterator),)) return tf.nest.map_structure(strategy.experimental_local_results, outputs) all_results = [] for _ in range(num_steps): predictions = predict_step(predict_iterator) for result in get_raw_results(predictions): all_results.append(result) if len(all_results) % 100 == 0: logging.info('Made predictions for %d records.', len(all_results)) return all_results def train_squad(strategy, input_meta_data, bert_config, custom_callbacks=None, run_eagerly=False, init_checkpoint=None, sub_model_export_name=None): """Run bert squad training.""" if strategy: logging.info('Training using customized training loop with distribution' ' strategy.') # Enables XLA in Session Config. Should not be set for TPU. keras_utils.set_session_config(FLAGS.enable_xla) performance.set_mixed_precision_policy(common_flags.dtype()) epochs = FLAGS.num_train_epochs num_train_examples = input_meta_data['train_data_size'] max_seq_length = input_meta_data['max_seq_length'] steps_per_epoch = int(num_train_examples / FLAGS.train_batch_size) warmup_steps = int(epochs * num_train_examples * 0.1 / FLAGS.train_batch_size) train_input_fn = get_dataset_fn( FLAGS.train_data_path, max_seq_length, FLAGS.train_batch_size, is_training=True) def _get_squad_model(): """Get Squad model and optimizer.""" squad_model, core_model = bert_models.squad_model( bert_config, max_seq_length, hub_module_url=FLAGS.hub_module_url, hub_module_trainable=FLAGS.hub_module_trainable) optimizer = optimization.create_optimizer(FLAGS.learning_rate, steps_per_epoch * epochs, warmup_steps, FLAGS.end_lr, FLAGS.optimizer_type) squad_model.optimizer = performance.configure_optimizer( optimizer, use_float16=common_flags.use_float16(), use_graph_rewrite=common_flags.use_graph_rewrite()) return squad_model, core_model # If explicit_allreduce = True, apply_gradients() no longer implicitly # allreduce gradients, users manually allreduce gradient and pass the # allreduced grads_and_vars to apply_gradients(). clip_by_global_norm will be # applied to allreduced gradients. def clip_by_global_norm_callback(grads_and_vars): grads, variables = zip(*grads_and_vars) (clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) return zip(clipped_grads, variables) model_training_utils.run_customized_training_loop( strategy=strategy, model_fn=_get_squad_model, loss_fn=get_loss_fn(), model_dir=FLAGS.model_dir, steps_per_epoch=steps_per_epoch, steps_per_loop=FLAGS.steps_per_loop, epochs=epochs, train_input_fn=train_input_fn, init_checkpoint=init_checkpoint or FLAGS.init_checkpoint, sub_model_export_name=sub_model_export_name, run_eagerly=run_eagerly, custom_callbacks=custom_callbacks, explicit_allreduce=False, post_allreduce_callbacks=[clip_by_global_norm_callback]) def prediction_output_squad(strategy, input_meta_data, tokenizer, squad_lib, predict_file, squad_model): """Makes predictions for a squad dataset.""" doc_stride = input_meta_data['doc_stride'] max_query_length = input_meta_data['max_query_length'] # Whether data should be in Ver 2.0 format. version_2_with_negative = input_meta_data.get('version_2_with_negative', False) eval_examples = squad_lib.read_squad_examples( input_file=predict_file, is_training=False, version_2_with_negative=version_2_with_negative) eval_writer = squad_lib.FeatureWriter( filename=os.path.join(FLAGS.model_dir, 'eval.tf_record'), is_training=False) eval_features = [] def _append_feature(feature, is_padding): if not is_padding: eval_features.append(feature) eval_writer.process_feature(feature) # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. kwargs = dict( examples=eval_examples, tokenizer=tokenizer, max_seq_length=input_meta_data['max_seq_length'], doc_stride=doc_stride, max_query_length=max_query_length, is_training=False, output_fn=_append_feature, batch_size=FLAGS.predict_batch_size) # squad_lib_sp requires one more argument 'do_lower_case'. if squad_lib == squad_lib_sp: kwargs['do_lower_case'] = FLAGS.do_lower_case dataset_size = squad_lib.convert_examples_to_features(**kwargs) eval_writer.close() logging.info('***** Running predictions *****') logging.info(' Num orig examples = %d', len(eval_examples)) logging.info(' Num split examples = %d', len(eval_features)) logging.info(' Batch size = %d', FLAGS.predict_batch_size) num_steps = int(dataset_size / FLAGS.predict_batch_size) all_results = predict_squad_customized( strategy, input_meta_data, eval_writer.filename, num_steps, squad_model) all_predictions, all_nbest_json, scores_diff_json = ( squad_lib.postprocess_output( eval_examples, eval_features, all_results, FLAGS.n_best_size, FLAGS.max_answer_length, FLAGS.do_lower_case, version_2_with_negative=version_2_with_negative, null_score_diff_threshold=FLAGS.null_score_diff_threshold, verbose=FLAGS.verbose_logging)) return all_predictions, all_nbest_json, scores_diff_json def dump_to_files(all_predictions, all_nbest_json, scores_diff_json, squad_lib, version_2_with_negative, file_prefix=''): """Save output to json files.""" output_prediction_file = os.path.join(FLAGS.model_dir, '%spredictions.json' % file_prefix) output_nbest_file = os.path.join(FLAGS.model_dir, '%snbest_predictions.json' % file_prefix) output_null_log_odds_file = os.path.join(FLAGS.model_dir, file_prefix, '%snull_odds.json' % file_prefix) logging.info('Writing predictions to: %s', (output_prediction_file)) logging.info('Writing nbest to: %s', (output_nbest_file)) squad_lib.write_to_json_files(all_predictions, output_prediction_file) squad_lib.write_to_json_files(all_nbest_json, output_nbest_file) if version_2_with_negative: squad_lib.write_to_json_files(scores_diff_json, output_null_log_odds_file) def _get_matched_files(input_path): """Returns all files that matches the input_path.""" input_patterns = input_path.strip().split(',') all_matched_files = [] for input_pattern in input_patterns: input_pattern = input_pattern.strip() if not input_pattern: continue matched_files = tf.io.gfile.glob(input_pattern) if not matched_files: raise ValueError('%s does not match any files.' % input_pattern) else: all_matched_files.extend(matched_files) return sorted(all_matched_files) def predict_squad(strategy, input_meta_data, tokenizer, bert_config, squad_lib, init_checkpoint=None): """Get prediction results and evaluate them to hard drive.""" if init_checkpoint is None: init_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir) all_predict_files = _get_matched_files(FLAGS.predict_file) squad_model = get_squad_model_to_predict(strategy, bert_config, init_checkpoint, input_meta_data) for idx, predict_file in enumerate(all_predict_files): all_predictions, all_nbest_json, scores_diff_json = prediction_output_squad( strategy, input_meta_data, tokenizer, squad_lib, predict_file, squad_model) if len(all_predict_files) == 1: file_prefix = '' else: # if predict_file is /path/xquad.ar.json, the `file_prefix` may be # "xquad.ar-0-" file_prefix = '%s-' % os.path.splitext( os.path.basename(all_predict_files[idx]))[0] dump_to_files(all_predictions, all_nbest_json, scores_diff_json, squad_lib, input_meta_data.get('version_2_with_negative', False), file_prefix) def eval_squad(strategy, input_meta_data, tokenizer, bert_config, squad_lib, init_checkpoint=None): """Get prediction results and evaluate them against ground truth.""" if init_checkpoint is None: init_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir) all_predict_files = _get_matched_files(FLAGS.predict_file) if len(all_predict_files) != 1: raise ValueError('`eval_squad` only supports one predict file, ' 'but got %s' % all_predict_files) squad_model = get_squad_model_to_predict(strategy, bert_config, init_checkpoint, input_meta_data) all_predictions, all_nbest_json, scores_diff_json = prediction_output_squad( strategy, input_meta_data, tokenizer, squad_lib, all_predict_files[0], squad_model) dump_to_files(all_predictions, all_nbest_json, scores_diff_json, squad_lib, input_meta_data.get('version_2_with_negative', False)) with tf.io.gfile.GFile(FLAGS.predict_file, 'r') as reader: dataset_json = json.load(reader) pred_dataset = dataset_json['data'] if input_meta_data.get('version_2_with_negative', False): eval_metrics = squad_evaluate_v2_0.evaluate(pred_dataset, all_predictions, scores_diff_json) else: eval_metrics = squad_evaluate_v1_1.evaluate(pred_dataset, all_predictions) return eval_metrics def export_squad(model_export_path, input_meta_data, bert_config): """Exports a trained model as a `SavedModel` for inference. Args: model_export_path: a string specifying the path to the SavedModel directory. input_meta_data: dictionary containing meta data about input and model. bert_config: Bert configuration file to define core bert layers. Raises: Export path is not specified, got an empty string or None. """ if not model_export_path: raise ValueError('Export path is not specified: %s' % model_export_path) # Export uses float32 for now, even if training uses mixed precision. tf.keras.mixed_precision.experimental.set_policy('float32') squad_model, _ = bert_models.squad_model(bert_config, input_meta_data['max_seq_length']) model_saving_utils.export_bert_model( model_export_path, model=squad_model, checkpoint_dir=FLAGS.model_dir) ```
[ { "content": "```python\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom . import kde_utils\n\nclass TestCDF(kde_utils.KDETester):\n @classmethod\n def setUpClass(cls):\n kde_utils.setupClass_lognorm(cls)\n\n def method_works(self, k, method, name):\n...
[ { "content": "<|memory_start|>```python\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom . import kde_utils\n\nclass TestCDF(kde_utils.KDETester):\n @classmethod\n def setUpClass(cls):\n kde_utils.setupClass_lognorm(cls)\n\n def method_works(self, k, ...
```python from __future__ import division, absolute_import, print_function import numpy as np from . import kde_utils class TestCDF(kde_utils.KDETester): @classmethod def setUpClass(cls): kde_utils.setupClass_lognorm(cls) def method_works(self, k, method, name): begin, last = k.cdf([k.lower, k.upper]) assert abs(last - 1) < method.accuracy, "Error, k.cdf({0}) = {1} should be close to 1".format(k.upper, last) assert abs(begin) < method.accuracy, "Error, k.cdf({0}) = {1} should be close to 0".format(k.lower, begin) def same_numeric(self, i, method): k = self.createKDE(self.vs[i], method) ys = k.cdf(self.xs) ys2 = np.empty(self.xs.shape, dtype=float) k.method.numeric_cdf(k, self.xs, ys2) np.testing.assert_allclose(ys, ys2, method.accuracy, method.accuracy) def grid_same_numeric(self, i, method): k = self.createKDE(self.vs[i], method) xs, ys = k.cdf_grid() ys2 = np.empty(self.xs.shape, dtype=float) k.method.numeric_cdf(k, xs, out=ys2) np.testing.assert_allclose(ys, ys2, method.grid_accuracy, method.grid_accuracy) def numeric_cdf(self, i, method): k = self.createKDE(self.vs[i], method) k.fit() ys = np.empty(self.xs.shape, dtype=float) k.method.numeric_cdf(k, self.xs, out=ys) xxs, yys = k.method.numeric_cdf_grid(k, N=2**12) ys2 = np.interp(self.xs, xxs, yys) np.testing.assert_allclose(ys, ys2, 100*method.accuracy, 100*method.accuracy) def test_same_numeric(self): self.same_numeric(0, kde_utils.methods[0]) def test_grid_same_numeric(self): self.grid_same_numeric(0, kde_utils.methods[0]) def test_numeric_cdf(self): self.numeric_cdf(0, kde_utils.methods[0]) def grid_method_works(self, k, method, name): xs, ys = k.cdf_grid(64) acc = method.accuracy assert np.all(ys >= -acc), "Some negative values" assert np.all(ys <= 1+acc), "CDF must be below one" assert np.all(ys[1:] - ys[:-1] >= -acc), "The CDF must be strictly growing." def kernel_works(self, ker, name): method = kde_utils.methods[0] k = self.createKDE(self.vs[1], method) k.kernel = ker.cls() begin, last = k.cdf([k.lower, k.upper]) acc = method.accuracy * ker.precision_factor assert abs(last - 1) < acc, "Error, k.cdf({0}) = {1} should be close to 1".format(k.upper, last) assert abs(begin) < acc, "Error, k.cdf({0}) = {1} should be close to 0".format(k.lower, begin) def grid_kernel_works(self, ker, name): method = kde_utils.methods[0] k = self.createKDE(self.vs[1], method) k.kernel = ker.cls() xs, ys = k.cdf_grid(cut=5) acc = method.accuracy * ker.precision_factor if ker.positive: # This is true only if the kernel is a probability ... that is not higher order! assert np.all(ys >= -acc), "Some negative values" assert np.all(ys <= 1+acc), "CDF must be below one" assert np.all(ys[1:] - ys[:-1] >= -acc), "The CDF must be strictly growing." else: assert abs(ys[0]) < acc, "Error, k.cdf({0}) = {1} should be close to 0".format(xs[0], ys[0]) assert abs(ys[-1]-1) < acc, "Error, k.cdf({0}) = {1} should be close to 1".format(xs[-1], ys[-1]) ```
[ { "content": "```python\n#!/usr/bin/env python\n# Copyright (C) 2011 Smarkets Limited <support@smarkets.com>\n#\n# This module is released under the MIT License:\n# http://www.opensource.org/licenses/mit-license.php\nimport glob\nimport io\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom distutils...
[ { "content": "<|memory_start|>```python\n#!/usr/bin/env python\n# Copyright (C) 2011 Smarkets Limited <support@smarkets.com>\n#\n# This module is released under the MIT License:\n# http://www.opensource.org/licenses/mit-license.php\nimport glob\nimport io\nimport os\nimport shutil\nimport subprocess\nimport sys...
```python #!/usr/bin/env python # Copyright (C) 2011 Smarkets Limited <support@smarkets.com> # # This module is released under the MIT License: # http://www.opensource.org/licenses/mit-license.php import glob import io import os import shutil import subprocess import sys from distutils.command import build, clean from distutils.spawn import find_executable from itertools import chain from os.path import abspath, dirname, join from setuptools import setup PROJECT_ROOT = abspath(dirname(__file__)) sys.path.insert(0, PROJECT_ROOT) def check_call(*args, **kwargs): print('Calling %s, %s' % (args, kwargs,)) subprocess.check_call(*args, **kwargs) ETO_PIQI_URL = 'https://raw.github.com/smarkets/eto_common/v1.2.3/eto.piqi' SETO_PIQI_URL = 'https://raw.github.com/smarkets/smk_api_common/v6.6.0/seto.piqi' def _safe_glob(pathname): "Do a safe version of glob which copes with win32" is_win32 = sys.platform == 'win32' for source in glob.glob(pathname): yield source.replace('/', '\\') if is_win32 else source protobuf_modules = ['eto', 'seto'] def protobuf_module_file(name): return join(PROJECT_ROOT, 'smarkets', 'streaming_api', '%s_pb2.py' % (name,)) class SmarketsProtocolBuild(build.build): "Class to build the protobuf output" description = "build the protocol buffer output with protobuf-compiler" def download(self, url): check_call((self.find('wget'), url)) def find(self, name): result = find_executable(name) if result is None: raise Exception("*** Cannot find %s; make sure it's installed" % (name,)) return result def run(self): "Get the .piqi definitions and run the 'protoc' compiler command" eto_piqi = join(PROJECT_ROOT, 'eto.piqi') if not os.path.exists(eto_piqi): self.download(ETO_PIQI_URL) seto_piqi = join(PROJECT_ROOT, 'seto.piqi') if not os.path.exists(seto_piqi): self.download(SETO_PIQI_URL) eto_proto = join(PROJECT_ROOT, 'smarkets.streaming_api.eto.proto') if not os.path.exists(eto_proto): check_call((self.find('piqi'), 'to-proto', eto_piqi, '-o', eto_proto)) seto_proto = join(PROJECT_ROOT, 'smarkets.streaming_api.seto.proto') if not os.path.exists(seto_proto): check_call((self.find('piqi'), 'to-proto', seto_piqi, '-o', seto_proto)) self.replace_file(seto_proto, lambda line: line.replace( 'import "eto.piqi.proto"', 'import "smarkets.streaming_api.eto.proto"')) for pkg in protobuf_modules: dst_pkg_file = protobuf_module_file(pkg) if not os.path.exists(dst_pkg_file): check_call((self.find('protoc'), '--python_out=.', 'smarkets.streaming_api.%s.proto' % (pkg,))) build.build.run(self) @staticmethod def replace_file(filename, line_map): "Map line_map for each line in filename" with open(filename, "r") as sources: lines = sources.readlines() with open(filename, "w") as sources: for line in lines: sources.write(line_map(line)) class SmarketsProtocolClean(clean.clean): """Class to clean up the built protobuf files.""" description = "clean up files generated by protobuf-compiler" def run(self): """Do the clean up""" for src_dir in [ join('build', 'pb'), ]: src_dir = join(PROJECT_ROOT, src_dir) if os.path.exists(src_dir): shutil.rmtree(src_dir) for filename in chain( _safe_glob('*.proto'), _safe_glob('*.piqi'), (join(PROJECT_ROOT, 'smarkets', 'streaming_api', '%s_pb2.py' % key) for key in ('eto', 'seto'))): if os.path.exists(filename): os.unlink(filename) # Call the parent class clean command clean.clean.run(self) readme_path = join(PROJECT_ROOT, 'README.rst') with io.open(readme_path, encoding='utf-8') as f: long_description = f.read() # this is not ideal but at at least we're not repeating ourselved when updating package version with open(join(PROJECT_ROOT, 'smarkets', '__init__.py')) as f: version_line = [line for line in f if line.startswith('__version__')][0] __version__ = version_line.split('=')[1].strip().strip("'").strip('"') sdict = { 'name': 'smk_python_sdk', 'version': __version__, 'description': 'Smarkets Python SDK - API clients and utility library', 'long_description': long_description, 'url': 'https://github.com/smarkets/smk_python_sdk', 'download_url': 'https://github.com/smarkets/smk_python_sdk/downloads/smk_python_sdk-%s.tar.gz' % ( __version__,), 'author': 'Smarkets Limited', 'author_email': 'support@smarkets.com', 'maintainer': 'Smarkets Limited', 'maintainer_email': 'support@smarkets.com', 'keywords': ['Smarkets', 'betting exchange'], 'license': 'MIT', 'packages': ['smarkets', 'smarkets.streaming_api', 'smarkets.tests'], 'classifiers': [ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python'], 'install_requires': [ 'decorator', 'iso8601', 'protobuf', 'pytz', 'six', ], 'zip_safe': False, 'cmdclass': { 'build': SmarketsProtocolBuild, 'clean': SmarketsProtocolClean, }, } def creating_a_distribution(): command_line = ' '.join(sys.argv) return 'sdist' in command_line or 'bdist' in command_line def make_sure_the_package_is_built(): # It used to be *very* easy to create a sdist/bdist without building # the package first and the resulting distribution would be incomplete, # this is to prevent that from happening. for name in protobuf_modules: file_name = protobuf_module_file(name) assert os.path.isfile(file_name), '%r not built' % (file_name,) if __name__ == '__main__': if creating_a_distribution(): make_sure_the_package_is_built() setup(**sdict) ```
[ { "content": "```python\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to i...
[ { "content": "<|memory_start|>```python\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable la...
```python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard.api import cinder class UpdateForm(forms.SelfHandlingForm): name = forms.CharField(max_length=255, label=_("Name")) description = forms.CharField(max_length=255, widget=forms.Textarea(attrs={'rows': 4}), label=_("Description"), required=False) def clean(self): cleaned_data = super(UpdateForm, self).clean() new_desc = cleaned_data.get('description') old_desc = self.initial['description'] if old_desc and not new_desc: error_msg = _("Description is required.") self._errors['description'] = self.error_class([error_msg]) return cleaned_data return cleaned_data def handle(self, request, data): group_id = self.initial['group_id'] try: cinder.group_update(request, group_id, data['name'], data['description']) message = _('Updating volume group "%s"') % data['name'] messages.info(request, message) return True except Exception: redirect = reverse("horizon:project:volume_groups:index") exceptions.handle(request, _('Unable to update volume group.'), redirect=redirect) class RemoveVolsForm(forms.SelfHandlingForm): def handle(self, request, data): group_id = self.initial['group_id'] name = self.initial['name'] search_opts = {'group_id': group_id} try: # get list of assigned volumes assigned_vols = [] volumes = cinder.volume_list(request, search_opts=search_opts) for volume in volumes: assigned_vols.append(volume.id) cinder.group_update(request, group_id, remove_volumes=assigned_vols) message = _('Removing volumes from volume group "%s"') % name messages.info(request, message) return True except Exception: redirect = reverse("horizon:project:volume_groups:index") exceptions.handle(request, _('Errors occurred in removing volumes ' 'from group.'), redirect=redirect) class DeleteForm(forms.SelfHandlingForm): delete_volumes = forms.BooleanField(label=_("Delete Volumes"), required=False) def handle(self, request, data): group_id = self.initial['group_id'] name = self.initial['name'] delete_volumes = data['delete_volumes'] try: cinder.group_delete(request, group_id, delete_volumes=delete_volumes) message = _('Deleting volume group "%s"') % name messages.success(request, message) return True except Exception: redirect = reverse("horizon:project:volume_groups:index") exceptions.handle(request, _('Errors occurred in deleting group.'), redirect=redirect) class CreateSnapshotForm(forms.SelfHandlingForm): name = forms.CharField(max_length=255, label=_("Snapshot Name")) description = forms.CharField(max_length=255, widget=forms.Textarea(attrs={'rows': 4}), label=_("Description"), required=False) def handle(self, request, data): group_id = self.initial['group_id'] try: message = _('Creating group snapshot "%s".') \ % data['name'] snapshot = cinder.group_snapshot_create(request, group_id, data['name'], data['description']) messages.info(request, message) return snapshot except Exception as e: redirect = reverse("horizon:project:volume_groups:index") msg = _('Unable to create group snapshot.') if e.code == 413: msg = _('Requested snapshot would exceed the allowed quota.') else: search_opts = {'group_id': group_id} volumes = cinder.volume_list(request, search_opts=search_opts) if len(volumes) == 0: msg = _('Unable to create snapshot. ' 'group must contain volumes.') exceptions.handle(request, msg, redirect=redirect) class CloneGroupForm(forms.SelfHandlingForm): name = forms.CharField(max_length=255, label=_("Group Name")) description = forms.CharField(max_length=255, widget=forms.Textarea(attrs={'rows': 4}), label=_("Description"), required=False) group_source = forms.ChoiceField( label=_("Use a group as source"), widget=forms.ThemableSelectWidget( attrs={'class': 'image-selector'}, data_attrs=('name'), transform=lambda x: "%s" % (x.name)), required=False) def prepare_group_source_field(self, request): try: group_id = self.initial['group_id'] group = cinder.group_get(request, group_id) self.fields['group_source'].choices = ((group_id, group),) except Exception: exceptions.handle(request, _('Unable to load the specified group.')) def __init__(self, request, *args, **kwargs): super(CloneGroupForm, self).__init__(request, *args, **kwargs) self.prepare_group_source_field(request) def handle(self, request, data): group_id = self.initial['group_id'] try: message = _('Creating consistency group "%s".') % data['name'] group = cinder.group_create_from_source( request, data['name'], source_group_id=group_id, description=data['description']) messages.info(request, message) return group except Exception: redirect = reverse("horizon:project:volume_groups:index") msg = _('Unable to clone group.') search_opts = {'group_id': group_id} volumes = cinder.volume_list(request, search_opts=search_opts) if len(volumes) == 0: msg = _('Unable to clone empty group.') exceptions.handle(request, msg, redirect=redirect) ```
[ { "content": "Provide a verbatim copy of the code:\n```python\n\"\"\"Public interface managing the workflow for peer assessments.\n\nThe Peer Assessment Workflow API exposes all public actions required to complete\nthe workflow for a given submission.\n\n\"\"\"\nimport logging\nimport json\n\nfrom django.db imp...
[ { "content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n\"\"\"Public interface managing the workflow for peer assessments.\n\nThe Peer Assessment Workflow API exposes all public actions required to complete\nthe workflow for a given submission.\n\n\"\"\"\nimport logging\nimport json\n\nfr...
```python """Public interface managing the workflow for peer assessments. The Peer Assessment Workflow API exposes all public actions required to complete the workflow for a given submission. """ import logging import json from django.db import DatabaseError, IntegrityError, transaction from django.utils import timezone from openassessment.assessment.errors import (PeerAssessmentInternalError, PeerAssessmentRequestError, PeerAssessmentWorkflowError) from openassessment.assessment.models import (Assessment, AssessmentFeedback, AssessmentPart, InvalidRubricSelection, PeerWorkflow, PeerWorkflowItem) from openassessment.assessment.models import TrackChanges from openassessment.assessment.serializers import (AssessmentFeedbackSerializer, InvalidRubric, RubricSerializer, full_assessment_dict, rubric_from_dict, serialize_assessments) from submissions import api as sub_api logger = logging.getLogger("openassessment.assessment.api.peer") PEER_TYPE = "PE" def submitter_is_finished(submission_uuid, peer_requirements): """ Check whether the submitter has made the required number of assessments. If the requirements dict is None (because we're being updated asynchronously or when the workflow is first created), then automatically return False. Args: submission_uuid (str): The UUID of the submission being tracked. peer_requirements (dict): Dictionary with the key "must_grade" indicating the required number of submissions the student must grade. Returns: bool """ if peer_requirements is None: return False try: workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) if workflow.completed_at is not None: return True elif workflow.num_peers_graded() >= peer_requirements["must_grade"]: workflow.completed_at = timezone.now() workflow.save() return True return False except PeerWorkflow.DoesNotExist: return False except KeyError: raise PeerAssessmentRequestError(u'Requirements dict must contain "must_grade" key') def assessment_is_finished(submission_uuid, peer_requirements): """ Check whether the submitter has received enough assessments to get a score. If the requirements dict is None (because we're being updated asynchronously or when the workflow is first created), then automatically return False. Args: submission_uuid (str): The UUID of the submission being tracked. peer_requirements (dict): Dictionary with the key "must_be_graded_by" indicating the required number of assessments the student must receive to get a score. Returns: bool """ if not peer_requirements: return False workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) if workflow is None: return False scored_items = workflow.graded_by.filter( assessment__submission_uuid=submission_uuid, assessment__score_type=PEER_TYPE ) return scored_items.count() >= peer_requirements["must_be_graded_by"] def on_start(submission_uuid): """Create a new peer workflow for a student item and submission. Creates a unique peer workflow for a student item, associated with a submission. Args: submission_uuid (str): The submission associated with this workflow. Returns: None Raises: SubmissionError: There was an error retrieving the submission. PeerAssessmentInternalError: Raised when there is an internal error creating the Workflow. """ try: with transaction.atomic(): submission = sub_api.get_submission_and_student(submission_uuid) workflow, __ = PeerWorkflow.objects.get_or_create( student_id=submission['student_item']['student_id'], course_id=submission['student_item']['course_id'], item_id=submission['student_item']['item_id'], submission_uuid=submission_uuid ) workflow.save() except IntegrityError: # If we get an integrity error, it means someone else has already # created a workflow for this submission, so we don't need to do anything. pass except DatabaseError: error_message = ( u"An internal error occurred while creating a new peer " u"workflow for submission {}" .format(submission_uuid) ) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) def get_score(submission_uuid, peer_requirements): """ Retrieve a score for a submission if requirements have been satisfied. Args: submission_uuid (str): The UUID of the submission. requirements (dict): Dictionary with the key "must_be_graded_by" indicating the required number of assessments the student must receive to get a score. Returns: A dictionary with the points earned, points possible, and contributing_assessments information, along with a None staff_id. """ if peer_requirements is None: return None # User hasn't completed their own submission yet if not submitter_is_finished(submission_uuid, peer_requirements): return None workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) if workflow is None: return None # Retrieve the assessments in ascending order by score date, # because we want to use the *first* one(s) for the score. items = workflow.graded_by.filter( assessment__submission_uuid=submission_uuid, assessment__score_type=PEER_TYPE ).order_by('-assessment') submission_finished = items.count() >= peer_requirements["must_be_graded_by"] if not submission_finished: return None # Unfortunately, we cannot use update() after taking a slice, # so we need to update the and save the items individually. # One might be tempted to first query for the first n assessments, # then select items that have those assessments. # However, this generates a SQL query with a LIMIT in a subquery, # which is not supported by some versions of MySQL. # Although this approach generates more database queries, the number is likely to # be relatively small (at least 1 and very likely less than 5). for scored_item in items[:peer_requirements["must_be_graded_by"]]: scored_item.scored = True scored_item.save() assessments = [item.assessment for item in items] return { "points_earned": sum( get_assessment_median_scores(submission_uuid).values() ), "points_possible": assessments[0].points_possible, "contributing_assessments": [assessment.id for assessment in assessments], "staff_id": None, } def create_assessment( scorer_submission_uuid, scorer_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, num_required_grades, scored_at=None, track_changes_edits=None, ): """Creates an assessment on the given submission. Assessments are created based on feedback associated with a particular rubric. Args: scorer_submission_uuid (str): The submission uuid for the Scorer's workflow. The submission being assessed can be determined via the peer workflow of the grading student. scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. Keyword Args: scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. track_changes_edits (str): Optional argument to specify that a track_changes entry should be created for this assessment, storing suggested edits to the original submission. Returns: dict: the Assessment model, serialized as a dict. Raises: PeerAssessmentRequestError: Raised when the submission_id is invalid, or the assessment_dict does not contain the required values to create an assessment. PeerAssessmentInternalError: Raised when there is an internal error while creating a new assessment. Examples: >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"} >>> criterion_feedback = {"clarity": "I thought this essay was very clear."} >>> feedback = "Your submission was thrilling." >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict) """ try: # Retrieve workflow information scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid) peer_workflow_item = scorer_workflow.find_active_assessments() if peer_workflow_item is None: message = ( u"There are no open assessments associated with the scorer's " u"submission UUID {}." ).format(scorer_submission_uuid) logger.warning(message) raise PeerAssessmentWorkflowError(message) peer_submission_uuid = peer_workflow_item.submission_uuid assessment = _complete_assessment( rubric_dict, scorer_id, peer_submission_uuid, options_selected, criterion_feedback, scorer_workflow, overall_feedback, num_required_grades, scored_at ) if track_changes_edits: json_edited_content = serialize_edited_content(track_changes_edits) change_tracker = TrackChanges( scorer_id=scorer_id, owner_submission_uuid=peer_submission_uuid, edited_content=track_changes_edits, json_edited_content=json_edited_content, ) change_tracker.save() _log_assessment(assessment, scorer_workflow) return full_assessment_dict(assessment) except PeerWorkflow.DoesNotExist: message = ( u"There is no Peer Workflow associated with the given " u"submission UUID {}." ).format(scorer_submission_uuid) logger.exception(message) raise PeerAssessmentWorkflowError(message) except InvalidRubric: msg = u"The rubric definition is not valid." logger.exception(msg) raise PeerAssessmentRequestError(msg) except InvalidRubricSelection: msg = u"Invalid options were selected in the rubric." logger.warning(msg, exc_info=True) raise PeerAssessmentRequestError(msg) except DatabaseError: error_message = ( u"An error occurred while creating an assessment by the scorer with this ID: {}" ).format(scorer_id) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) @transaction.atomic def _complete_assessment( rubric_dict, scorer_id, peer_submission_uuid, options_selected, criterion_feedback, scorer_workflow, overall_feedback, num_required_grades, scored_at ): """ Internal function for atomic assessment creation. Creates a peer assessment and closes the associated peer workflow item in a single transaction. Args: rubric_dict (dict): The rubric model associated with this assessment scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. peer_submission_uuid (str): The submission uuid for the submission being assessed. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the scorer. Updates the workflow item associated with this assessment. overall_feedback (unicode): Free-form text feedback on the submission overall. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: The Assessment model """ # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the peer assessment assessment = Assessment.create( rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback ) # Create assessment parts for each criterion in the rubric # This will raise an `InvalidRubricSelection` if the selected options do not # match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) # Close the active assessment scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades) return assessment def get_rubric_max_scores(submission_uuid): """Gets the maximum possible value for each criterion option Iterates over the rubric used to grade the given submission, and creates a dictionary of maximum possible values. Args: submission_uuid: The submission to get the associated rubric max scores. Returns: A dictionary of max scores for this rubric's criteria options. Returns None if no assessments are found for this submission. Raises: PeerAssessmentInternalError: Raised when there is an error retrieving the submission, or its associated rubric. """ try: assessments = list( Assessment.objects.filter( submission_uuid=submission_uuid ).order_by("-scored_at", "-id").select_related("rubric")[:1] ) if not assessments: return None assessment = assessments[0] rubric_dict = RubricSerializer.serialized_from_cache(assessment.rubric) return { criterion["name"]: criterion["points_possible"] for criterion in rubric_dict["criteria"] } except DatabaseError: error_message = ( u"Error getting rubric options max scores for submission uuid {uuid}" ).format(uuid=submission_uuid) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) def get_assessment_median_scores(submission_uuid): """Get the median score for each rubric criterion For a given assessment, collect the median score for each criterion on the rubric. This set can be used to determine the overall score, as well as each part of the individual rubric scores. If there is a true median score, it is returned. If there are two median values, the average of those two values is returned, rounded up to the greatest integer value. Args: submission_uuid (str): The submission uuid is used to get the assessments used to score this submission, and generate the appropriate median score. Returns: dict: A dictionary of rubric criterion names, with a median score of the peer assessments. Raises: PeerAssessmentInternalError: If any error occurs while retrieving information to form the median scores, an error is raised. """ try: workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid) items = workflow.graded_by.filter(scored=True) assessments = [item.assessment for item in items] scores = Assessment.scores_by_criterion(assessments) return Assessment.get_median_score_dict(scores) except PeerWorkflow.DoesNotExist: return {} except DatabaseError: error_message = ( u"Error getting assessment median scores for submission {uuid}" ).format(uuid=submission_uuid) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) def has_finished_required_evaluating(submission_uuid, required_assessments): """Check if a student still needs to evaluate more submissions Per the contract of the peer assessment workflow, a student must evaluate a number of peers before receiving feedback on their submission. Args: submission_uuid (str): The submission UUID is required to determine if the associated student has completed enough assessments. This argument is required. required_assessments (int): The number of assessments a student has to submit before receiving the feedback on their submission. This is a required argument. Returns: tuple: True if the student has evaluated enough peer submissions to move through the peer assessment workflow. False if the student needs to evaluate more peer submissions. The second value is the count of assessments completed. Raises: PeerAssessmentRequestError: Raised when the submission UUID is invalid, or the required_assessments is not a positive integer. PeerAssessmentInternalError: Raised when there is an internal error while evaluating this workflow rule. Examples: >>> has_finished_required_evaluating("abc123", 3) True, 3 """ workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) done = False peers_graded = 0 if workflow: peers_graded = workflow.num_peers_graded() done = (peers_graded >= required_assessments) return done, peers_graded def get_assessments(submission_uuid, limit=None): """Retrieve the assessments for a submission. Retrieves all the assessments for a submissions. This API returns related feedback without making any assumptions about grading. Any outstanding assessments associated with this submission will not be returned. Args: submission_uuid (str): The submission all the requested assessments are associated with. Required. Keyword Arguments: limit (int): Limit the returned assessments. If None, returns all. Returns: list: A list of dictionaries, where each dictionary represents a separate assessment. Each assessment contains points earned, points possible, time scored, scorer id, score type, and feedback. Raises: PeerAssessmentRequestError: Raised when the submission_id is invalid. PeerAssessmentInternalError: Raised when there is an internal error while retrieving the assessments associated with this submission. Examples: >>> get_assessments("1", limit=2) [ { 'points_earned': 6, 'points_possible': 12, 'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>), 'scorer': u"Tim", 'feedback': u'Your submission was thrilling.' }, { 'points_earned': 11, 'points_possible': 12, 'scored_at': datetime.datetime(2014, 1, 31, 14, 10, 17, 544214 tzinfo=<UTC>), 'scorer': u"Bob", 'feedback': u'Great submission.' } ] """ try: assessments = Assessment.objects.filter( submission_uuid=submission_uuid, score_type=PEER_TYPE )[:limit] return serialize_assessments(assessments) except DatabaseError: error_message = ( u"Error getting assessments for submission {uuid}" ).format(uuid=submission_uuid) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) def get_submitted_assessments(submission_uuid, limit=None): """Retrieve the assessments created by the given submission's author. Retrieves all the assessments created by the given submission's author. This API returns related feedback without making any assumptions about grading. Any outstanding assessments associated with this submission will not be returned. Args: submission_uuid (str): The submission of the student whose assessments we are requesting. Required. Keyword Arguments: limit (int): Limit the returned assessments. If None, returns all. Returns: list(dict): A list of dictionaries, where each dictionary represents a separate assessment. Each assessment contains points earned, points possible, time scored, scorer id, score type, and feedback. If no workflow is found associated with the given submission_uuid, returns an empty list. Raises: PeerAssessmentRequestError: Raised when the submission_id is invalid. PeerAssessmentInternalError: Raised when there is an internal error while retrieving the assessments associated with this submission. Examples: >>> get_submitted_assessments("1", limit=2) [ { 'points_earned': 6, 'points_possible': 12, 'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>), 'scorer': u"Tim", 'feedback': u'Your submission was thrilling.' }, { 'points_earned': 11, 'points_possible': 12, 'scored_at': datetime.datetime(2014, 1, 31, 14, 10, 17, 544214 tzinfo=<UTC>), 'scorer': u"Tim", 'feedback': u'Great submission.' } ] """ try: # If no workflow is found associated with the uuid, this returns None, # and an empty set of assessments will be returned. workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) items = PeerWorkflowItem.objects.filter( scorer=workflow, assessment__isnull=False ) assessments = Assessment.objects.filter( pk__in=[item.assessment.pk for item in items])[:limit] return serialize_assessments(assessments) except DatabaseError: error_message = ( u"Couldn't retrieve the assessments completed by the " " student with submission {uuid}" ).format(uuid=submission_uuid) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) def get_submission_to_assess(submission_uuid, graded_by): """Get a submission to peer evaluate. Retrieves a submission for assessment for the given student. This will not return a submission submitted by the requesting scorer. Submissions are returned based on how many assessments are still required, and if there are peers actively assessing a particular submission. If there are no submissions requiring assessment, a submission may be returned that will be 'over graded', and the assessment will not be counted towards the overall grade. Args: submission_uuid (str): The submission UUID from the student requesting a submission for assessment. This is used to explicitly avoid giving the student their own submission, and determines the associated Peer Workflow. graded_by (int): The number of assessments a submission requires before it has completed the peer assessment process. Returns: dict: A peer submission for assessment. This contains a 'student_item', 'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be used for assessment. Raises: PeerAssessmentRequestError: Raised when the request parameters are invalid for the request. PeerAssessmentInternalError: Raised when there is an internal error retrieving peer workflow information. PeerAssessmentWorkflowError: Raised when an error occurs because this function, or the student item, is not in the proper workflow state to retrieve a peer submission. Examples: >>> get_submission_to_assess("abc123", 3) { 'student_item': 2, 'attempt_number': 1, 'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>), 'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>), 'answer': u'The answer is 42.' } """ workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) if not workflow: raise PeerAssessmentWorkflowError( u"A Peer Assessment Workflow does not exist for the student " u"with submission UUID {}".format(submission_uuid) ) if workflow.is_cancelled: return None open_item = workflow.find_active_assessments() peer_submission_uuid = open_item.submission_uuid if open_item else None # If there is an active assessment for this user, get that submission, # otherwise, get the first assessment for review, otherwise, # get the first submission available for over grading ("over-grading"). if peer_submission_uuid is None: peer_submission_uuid = workflow.get_submission_for_review(graded_by) if peer_submission_uuid is None: peer_submission_uuid = workflow.get_submission_for_over_grading() if peer_submission_uuid: try: submission_data = sub_api.get_submission(peer_submission_uuid) PeerWorkflow.create_item(workflow, peer_submission_uuid) _log_workflow(peer_submission_uuid, workflow) return submission_data except sub_api.SubmissionNotFoundError: error_message = ( u"Could not find a submission with the uuid {} for student {} " u"in the peer workflow." ).format(peer_submission_uuid, workflow.student_id) logger.exception(error_message) raise PeerAssessmentWorkflowError(error_message) else: logger.info( u"No submission found for {} to assess ({}, {})" .format( workflow.student_id, workflow.course_id, workflow.item_id, ) ) return None def create_peer_workflow(submission_uuid): """Create a new peer workflow for a student item and submission. Creates a unique peer workflow for a student item, associated with a submission. Args: submission_uuid (str): The submission associated with this workflow. Returns: None Raises: SubmissionError: There was an error retrieving the submission. PeerAssessmentInternalError: Raised when there is an internal error creating the Workflow. Examples: >>> create_peer_workflow("1") """ try: with transaction.atomic(): submission = sub_api.get_submission_and_student(submission_uuid) workflow, __ = PeerWorkflow.objects.get_or_create( student_id=submission['student_item']['student_id'], course_id=submission['student_item']['course_id'], item_id=submission['student_item']['item_id'], submission_uuid=submission_uuid ) workflow.save() except IntegrityError: # If we get an integrity error, it means someone else has already # created a workflow for this submission, so we don't need to do anything. pass except DatabaseError: error_message = ( u"An internal error occurred while creating a new peer " u"workflow for submission {}" ).format(submission_uuid) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) def create_peer_workflow_item(scorer_submission_uuid, submission_uuid): """ Begin peer-assessing a particular submission. Note that this does NOT pick the submission from the prioritized list of available submissions. Mainly useful for testing. Args: scorer_submission_uuid (str): The ID of the scoring student. submission_uuid (str): The unique identifier of the submission being scored Returns: None Raises: PeerAssessmentWorkflowError: Could not find the workflow for the student. PeerAssessmentInternalError: Could not create the peer workflow item. """ workflow = PeerWorkflow.get_by_submission_uuid(scorer_submission_uuid) PeerWorkflow.create_item(workflow, submission_uuid) def get_assessment_feedback(submission_uuid): """ Retrieve a feedback on an assessment. Args: submission_uuid: The submission we want to retrieve assessment feedback for. Returns: dict or None Raises: PeerAssessmentInternalError: Error occurred while retrieving the feedback. """ try: feedback = AssessmentFeedback.objects.get( submission_uuid=submission_uuid ) return AssessmentFeedbackSerializer(feedback).data except AssessmentFeedback.DoesNotExist: return None except DatabaseError: error_message = ( u"An error occurred retrieving assessment feedback for {}." .format(submission_uuid) ) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) def set_assessment_feedback(feedback_dict): """ Set a feedback object for an assessment to have some new values. Sets or updates the assessment feedback with the given values in the dict. Args: feedback_dict (dict): A dictionary of all the values to update or create a new assessment feedback. Returns: None Raises: PeerAssessmentRequestError PeerAssessmentInternalError """ submission_uuid = feedback_dict.get('submission_uuid') feedback_text = feedback_dict.get('feedback_text') selected_options = feedback_dict.get('options', list()) if feedback_text and len(feedback_text) > AssessmentFeedback.MAXSIZE: error_message = u"Assessment feedback too large." raise PeerAssessmentRequestError(error_message) try: # Get or create the assessment model for this submission # If we receive an integrity error, assume that someone else is trying to create # another feedback model for this submission, and raise an exception. if submission_uuid: feedback, created = AssessmentFeedback.objects.get_or_create(submission_uuid=submission_uuid) else: error_message = u"An error occurred creating assessment feedback: bad or missing submission_uuid." logger.error(error_message) raise PeerAssessmentRequestError(error_message) # Update the feedback text if feedback_text is not None: feedback.feedback_text = feedback_text # Save the feedback model. We need to do this before setting m2m relations. if created or feedback_text is not None: feedback.save() # Associate the feedback with selected options feedback.add_options(selected_options) # Associate the feedback with scored assessments assessments = PeerWorkflowItem.get_scored_assessments(submission_uuid) feedback.assessments.add(*assessments) except DatabaseError: msg = u"Error occurred while creating or updating feedback on assessment: {}".format(feedback_dict) logger.exception(msg) raise PeerAssessmentInternalError(msg) def _log_assessment(assessment, scorer_workflow): """ Log the creation of a peer assessment. Args: assessment (Assessment): The assessment model that was created. scorer_workflow (dict): A dictionary representation of the Workflow belonging to the scorer of this assessment. Returns: None """ logger.info( u"Created peer-assessment {assessment_id} for submission " u"{submission_uuid}, course {course_id}, item {item_id} " u"with rubric {rubric_content_hash}; scored by {scorer}" .format( assessment_id=assessment.id, submission_uuid=assessment.submission_uuid, course_id=scorer_workflow.course_id, item_id=scorer_workflow.item_id, rubric_content_hash=assessment.rubric.content_hash, scorer=scorer_workflow.student_id, ) ) def _log_workflow(submission_uuid, workflow): """ Log the creation of a peer-assessment workflow. Args: submission_uuid (str): The UUID of the submission being assessed. workflow (PeerWorkflow): The Peer Workflow of the student making the assessment. """ logger.info( u"Retrieved submission {} ({}, {}) to be assessed by {}" .format( submission_uuid, workflow.course_id, workflow.item_id, workflow.student_id, ) ) def is_workflow_cancelled(submission_uuid): """ Check if workflow submission is cancelled. Args: submission_uuid (str): The UUID of the workflow's submission. Returns: True/False """ if submission_uuid is None: return False try: workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) return workflow.is_cancelled if workflow else False except PeerAssessmentWorkflowError: return False def on_cancel(submission_uuid): """Cancel the peer workflow for submission. Sets the cancelled_at field in peer workflow. Args: submission_uuid (str): The submission UUID associated with this workflow. Returns: None """ try: workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid) if workflow: workflow.cancelled_at = timezone.now() workflow.save() except (PeerAssessmentWorkflowError, DatabaseError): error_message = ( u"An internal error occurred while cancelling the peer" u"workflow for submission {}" .format(submission_uuid) ) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) def serialize_edited_content(track_changes_edits): """Serialize submission content with track changes. Required now that multiple prompts are possible. Args: track_change_edits (array): Content of assessment for each prompt with track changes. Returns: json representation of content of assessment for each prompt with track changes. """ edited_content_array = [] for edited_content in track_changes_edits: content_dict = {'text': edited_content} edited_content_array.append(content_dict) return json.dumps({'parts': edited_content_array}) ```
[ { "content": "Here is a code file:\n```python\n#!/usr/bin/env python\n# Copyright 2015-2016 Yelp Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache...
[ { "content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python\n# Copyright 2015-2016 Yelp Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# h...
```python #!/usr/bin/env python # Copyright 2015-2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import datetime import logging import dateutil.parser from dateutil import tz from pytimeparse import timeparse from paasta_tools import marathon_tools log = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-a', '--age', dest='age', type=timedelta_type, default='1h', help="Max age of a Marathon deployment before it is stopped." "Any pytimeparse unit is supported") parser.add_argument('-n', '--dry-run', action="store_true", help="Don't actually stop any Marathon deployments") parser.add_argument('-v', '--verbose', action='store_true') options = parser.parse_args() return options def timedelta_type(value): """Return the :class:`datetime.datetime.DateTime` for a time in the past. :param value: a string containing a time format supported by :mod:`pytimeparse` """ if value is None: return None return datetime_seconds_ago(timeparse.timeparse(value)) def datetime_seconds_ago(seconds): return now() - datetime.timedelta(seconds=seconds) def now(): return datetime.datetime.now(tz.tzutc()) def delete_deployment_if_too_old(client, deployment, max_date, dry_run): started_at = dateutil.parser.parse(deployment.version) age = now() - started_at if started_at < max_date: if dry_run is True: log.warning("Would delete %s for %s as it is %s old" % (deployment.id, deployment.affected_apps[0], age)) else: log.warning("Deleting %s for %s as it is %s old" % (deployment.id, deployment.affected_apps[0], age)) client.delete_deployment(deployment_id=deployment.id, force=True) else: if dry_run is True: log.warning("NOT deleting %s for %s as it is %s old" % (deployment.id, deployment.affected_apps[0], age)) def main(): args = parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) config = marathon_tools.load_marathon_config() client = marathon_tools.get_marathon_client(config.get_url(), config.get_username(), config.get_password()) for deployment in client.list_deployments(): delete_deployment_if_too_old( client=client, deployment=deployment, max_date=args.age, dry_run=args.dry_run, ) if __name__ == "__main__": main() ```
[ { "content": "```python\n#!usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Final Project\"\"\"\n\ndef print_menu():\n \"\"\"Welcome To Your Phonebook\n\n Args: None\n Returns: None\n Examples:\n >>> Enter your choice (1-4) here: 1\n >>> Enter new contact name: chris\n >>> Enter contac...
[ { "content": "<|memory_start|>```python\n#!usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Final Project\"\"\"\n\ndef print_menu():\n \"\"\"Welcome To Your Phonebook\n\n Args: None\n Returns: None\n Examples:\n >>> Enter your choice (1-4) here: 1\n >>> Enter new contact name: chris\n ...
```python #!usr/bin/env python # -*- coding: utf-8 -*- """Final Project""" def print_menu(): """Welcome To Your Phonebook Args: None Returns: None Examples: >>> Enter your choice (1-4) here: 1 >>> Enter new contact name: chris >>> Enter contact phone number: 347192456 ...You entered your first contact """ print('1. Print Phone Numbers') print('2. Add a Phone Number') print('3. Remove a Phone Number') print('4. Lookup a Phone Number') print('5. Quit') print() numbers = {} menu_choice = 0 print_menu() while menu_choice != 5: menu_choice = int(input("Type in a number (1-5): ")) if menu_choice == 1: print("Telephone Numbers:") for x in numbers.keys(): print("Name: ", x, "\tNumber:", numbers[x]) print() elif menu_choice == 2: print("Add Name and Number") name = input("Name: ") phone = input("Number: ") numbers[name] = phone elif menu_choice == 3: print("Remove Name and Number") name = input("Name: ") number = input("Number: ") print = input("Number is now deleted") if name in numbers: del numbers[name] else: print(name, "was not found") elif menu_choice == 4: print("Lookup Number") name = input("Name: ") if name in numbers: print("The number is", numbers[name]) else: print(name, "was not found") elif menu_choice != 5: print_menu() ```
[ { "content": "Repeat the code precisely as written (spacing intact):\n```python\n# -*- coding: utf-8 -*-\n#\n# This file is execfile()d with the current directory set to its containing dir.\n\n# To run the docs build directly, generate module index:\n# rm -r docs/api ; sphinx-apidoc -f -M -o docs/api smif\n# ...
[ { "content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# This file is execfile()d with the current directory set to its containing dir.\n\n# To run the docs build directly, generate module index:\n# rm -r docs/api ; sphinx-apidoc -f -M -o d...
```python # -*- coding: utf-8 -*- # # This file is execfile()d with the current directory set to its containing dir. # To run the docs build directly, generate module index: # rm -r docs/api ; sphinx-apidoc -f -M -o docs/api smif # Then build docs: # rm -r docs/_build ; sphinx-build -b html docs docs/_build/html # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # -- Hack for ReadTheDocs and apidoc options ----------------------------------- # This hack is necessary since RTD does not issue `sphinx-apidoc` before running # If extensions (or modules to document with autodoc) are in another directory, # DON'T FORGET: Check the box "Install your project inside a virtualenv using # setup.py install" in the RTD Advanced Settings. # `sphinx-build -b html . _build/html`. See Issue: # https://github.com/rtfd/readthedocs.org/issues/1139 # It also appears necessary in order to pass options to sphinx-apidoc which obr # or setuptools don't currently allow. See issue: # https://github.com/sphinx-doc/sphinx/issues/1861 import inspect import os import sys from unittest.mock import MagicMock import better_apidoc # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) __location__ = os.path.join(os.getcwd(), os.path.dirname( inspect.getfile(inspect.currentframe()))) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.join(__location__, '../src')) class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() # mock modules which we can avoid installing for docs-building mock_modules = [ 'fiona', 'rtree', 'shapely', 'shapely.geometry', 'shapely.validation', 'pandas', 'pandas.core', 'pandas.core.internals', 'xarray' ] sys.modules.update((mod_name, Mock()) for mod_name in mock_modules) output_dir = os.path.join(__location__, "api") module_dir = os.path.join(__location__, "../src/smif") templates_dir = os.path.join(__location__, "_templates") better_apidoc.main([ 'better-apidoc', '-t', templates_dir, '--force', '--separate', '-o', output_dir, module_dir ]) # -- General configuration ----------------------------------------------------- # Extra styles, found in _static def setup(app): app.add_stylesheet('theme_tweaks.css') # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.autosummary', 'sphinx.ext.imgmath', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'sphinx.ext.autosectionlabel'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'smif' copyright = u'2017, Will Usher, Tom Russell' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # Is set by calling `setup.py docs` # The full version, including alpha/beta/rc tags. release = '' # Is set by calling `setup.py docs` # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', '../tests/**'] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'paraiso-dark' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['smif.'] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". try: from smif import __version__ as version except ImportError: pass else: release = version # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = "" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'smif-doc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). 'papersize': 'a4paper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'user_guide.tex', u'smif Documentation', u'Will Usher & Tom Russell', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = "" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- External mapping ------------------------------------------------------------ python_version = '.'.join(map(str, sys.version_info[0:2])) intersphinx_mapping = { 'matplotlib': ('http://matplotlib.org/', None), 'numpy': ('https://docs.scipy.org/doc/numpy/', None), 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None), 'python': ('https://docs.python.org/' + python_version, None), 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), 'sklearn': ('http://scikit-learn.org/stable/', None), 'sphinx': ('http://www.sphinx-doc.org/en/stable/', None), 'xarray': ('http://xarray.pydata.org/en/stable/', None), } ```
[ { "content": "Replicate the source code:\n```python\n<<<<<<< HEAD\nfrom ListNode import ListNode\nclass Solution(object):\n def swapPairs(self, head):\n if not head or not head.next :\n return head\n resNode = head.next\n while head :\n pre = head\n head ...
[ { "content": "Replicate the source code:\n<|memory_start|>```python\n<<<<<<< HEAD\nfrom ListNode import ListNode\nclass Solution(object):\n def swapPairs(self, head):\n if not head or not head.next :\n return head\n resNode = head.next\n while head :\n pre = head\n ...
```python <<<<<<< HEAD from ListNode import ListNode class Solution(object): def swapPairs(self, head): if not head or not head.next : return head resNode = head.next while head : pre = head head = head.next.next ======= # https://leetcode.com/problems/swap-nodes-in-pairs/ from ListNode import ListNode class Solution(object): def swapPairs(self, head): if not head: return [] if not head.next: return head r_head = ListNode(0) l = r_head l.next = head m = head r = head.next while m or r: if not r: return r_head.next else: m.next = r.next r.next = m l.next = r m = m.next r = r.next.next l = l.next.next if r: r = r.next return r_head.next four = ListNode(4) three = ListNode(3) two = ListNode(2) one = ListNode(1) one.next = two two.next = three three.next = four # while one : # print(one.val) # one = one.next Solution().swapPairs(one) >>>>>>> 83d0b11e2eaab6e16fd7a88d6e65f2bdbd6dbe15 ```
[ { "content": "Return the code unaltered:\n```python\n# -*- coding: utf-8 -*-\n#\n# This file is part of EventGhost.\n# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>\n#\n# EventGhost is free software: you can redistribute it and/or modify it under\n# the terms of the GNU General Public Li...
[ { "content": "Return the code unaltered:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# This file is part of EventGhost.\n# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>\n#\n# EventGhost is free software: you can redistribute it and/or modify it under\n# the terms of the GNU G...
```python # -*- coding: utf-8 -*- # # This file is part of EventGhost. # Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/> # # EventGhost is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 2 of the License, or (at your option) # any later version. # # EventGhost is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along # with EventGhost. If not, see <http://www.gnu.org/licenses/>. """ .. attribute:: globals :class:`eg.Bunch` instance, that holds all global variables used by PythonCommand actions. PythonScripts (and all other code) can access these globals through :obj:`eg.globals`. .. attribute:: event Instance of the :class:`eg.EventGhostEvent` instance, that is currently been processed. .. autofunction:: eg.DummyFunc """ import asyncore import locale import os import socket import sys import threading import time import wx from os.path import exists, join # Local imports import eg import Init eg.APP_NAME = "EventGhost" eg.CORE_PLUGIN_GUIDS = ( "{9D499A2C-72B6-40B0-8C8C-995831B10BB4}", # "EventGhost" "{A21F443B-221D-44E4-8596-E1ED7100E0A4}", # "System" "{E974D074-B0A3-4D0C-BBD1-992475DDD69D}", # "Window" "{6B1751BF-F94E-4260-AB7E-64C0693FD959}", # "Mouse" ) eg.ID_TEST = wx.NewId() eg.mainDir = eg.Cli.mainDir eg.imagesDir = join(eg.mainDir, "images") eg.languagesDir = join(eg.mainDir, "languages") eg.sitePackagesDir = join( eg.mainDir, "lib%d%d" % sys.version_info[:2], "site-packages" ) eg.revision = 2000 # Deprecated eg.startupArguments = eg.Cli.args eg.debugLevel = 0 eg.systemEncoding = locale.getdefaultlocale()[1] eg.document = None eg.result = None eg.plugins = eg.Bunch() eg.globals = eg.Bunch() eg.globals.eg = eg eg.event = None eg.eventTable = {} eg.eventString = "" eg.notificationHandlers = {} eg.programCounter = None eg.programReturnStack = [] eg.indent = 0 eg.pluginList = [] eg.mainThread = threading.currentThread() eg.stopExecutionFlag = False eg.lastFoundWindows = [] eg.currentItem = None eg.actionGroup = eg.Bunch() eg.actionGroup.items = [] eg.folderPath = eg.FolderPath() def _CommandEvent(): """Generate new (CmdEvent, Binder) tuple e.g. MooCmdEvent, EVT_MOO = EgCommandEvent() """ evttype = wx.NewEventType() class _Event(wx.PyCommandEvent): def __init__(self, id, **kw): wx.PyCommandEvent.__init__(self, evttype, id) self.__dict__.update(kw) if not hasattr(self, "value"): self.value = None def GetValue(self): return self.value def SetValue(self, value): self.value = value return _Event, wx.PyEventBinder(evttype, 1) eg.CommandEvent = _CommandEvent eg.ValueChangedEvent, eg.EVT_VALUE_CHANGED = eg.CommandEvent() eg.pyCrustFrame = None eg.dummyAsyncoreDispatcher = None if eg.startupArguments.configDir is None: eg.configDir = join(eg.folderPath.RoamingAppData, eg.APP_NAME) else: eg.configDir = eg.startupArguments.configDir if not exists(eg.configDir): try: os.makedirs(eg.configDir) except: pass if eg.startupArguments.isMain: if exists(eg.configDir): os.chdir(eg.configDir) else: os.chdir(eg.mainDir) eg.localPluginDir = join(eg.folderPath.ProgramData, eg.APP_NAME, "plugins") eg.corePluginDir = join(eg.mainDir, "plugins") eg.pluginDirs = [eg.corePluginDir, eg.localPluginDir] Init.InitPathsAndBuiltins() from eg.WinApi.Dynamic import GetCurrentProcessId # NOQA eg.processId = GetCurrentProcessId() Init.InitPil() class Exception(Exception): def __unicode__(self): try: return "\n".join([unicode(arg) for arg in self.args]) except UnicodeDecodeError: return "\n".join([str(arg).decode('mbcs') for arg in self.args]) class StopException(Exception): pass class HiddenAction: pass def Bind(notification, listener): if notification not in eg.notificationHandlers: notificationHandler = eg.NotificationHandler() eg.notificationHandlers[notification] = notificationHandler else: notificationHandler = eg.notificationHandlers[notification] notificationHandler.listeners.append(listener) def CallWait(func, *args, **kwargs): result = [None] event = threading.Event() def CallWaitWrapper(): try: result[0] = func(*args, **kwargs) finally: event.set() wx.CallAfter(CallWaitWrapper) event.wait() return result[0] def DummyFunc(*dummyArgs, **dummyKwargs): """ Just a do-nothing-function, that accepts arbitrary arguments. """ pass def Exit(): """ Sometimes you want to quickly exit a PythonScript, because you don't want to build deeply nested if-structures for example. eg.Exit() will exit your PythonScript immediately. (Note: This is actually a sys.exit() but will not exit EventGhost, because the SystemExit exception is catched for a PythonScript.) """ sys.exit() def HasActiveHandler(eventstring): for eventHandler in eg.eventTable.get(eventstring, []): obj = eventHandler while obj: if not obj.isEnabled: break obj = obj.parent else: return True return False def MessageBox(message, caption=eg.APP_NAME, style=wx.OK, parent=None): if parent is None: style |= wx.STAY_ON_TOP dialog = eg.MessageDialog(parent, message, caption, style) result = dialog.ShowModal() dialog.Destroy() return result def Notify(notification, value=None): if notification in eg.notificationHandlers: for listener in eg.notificationHandlers[notification].listeners: listener(value) # pylint: disable-msg=W0613 def RegisterPlugin( name = None, description = None, kind = "other", author = "[unknown author]", version = "[unknown version]", icon = None, canMultiLoad = False, createMacrosOnAdd = False, url = None, help = None, guid = None, **kwargs ): """ Registers information about a plugin to EventGhost. :param name: should be a short descriptive string with the name of the plugin. :param description: a short description of the plugin. :param kind: gives a hint about the category the plugin belongs to. It should be a string with a value out of ``"remote"`` (for remote receiver plugins), ``"program"`` (for program control plugins), ``"external"`` (for plugins that control external hardware) or ``"other"`` (if none of the other categories match). :param author: can be set to the name or a list of names of the developer(s) of the plugin. :param version: can be set to a version string. :param icon: can be a base64 encoded image for the plugin. If ``icon == None``, an "icon.png" will be used if it exists in the plugin folder. :param canMultiLoad: set this to ``True``, if a configuration can have more than one instance of this plugin. :param createMacrosOnAdd: if set to ``True``, when adding the plugin, EventGhost will ask the user, if he/she wants to add a folder with all actions of this plugin to his/her configuration. :param url: displays a clickable link in the plugin info dialog. :param help: a longer description and/or additional information for the plugin. Will be added to 'description'. :param guid: will help EG to identify your plugin, so there are no name clashes with other plugins that accidentally might have the same name and will later ease the update of plugins. :param \*\*kwargs: just to consume unknown parameters, to make the call backward compatible. """ pass # pylint: enable-msg=W0613 def RestartAsyncore(): """ Informs the asyncore loop of a new socket to handle. """ oldDispatcher = eg.dummyAsyncoreDispatcher dispatcher = asyncore.dispatcher() dispatcher.create_socket(socket.AF_INET, socket.SOCK_STREAM) eg.dummyAsyncoreDispatcher = dispatcher if oldDispatcher: oldDispatcher.close() if oldDispatcher is None: # create a global asyncore loop thread threading.Thread(target=asyncore.loop, name="AsyncoreThread").start() def RunProgram(): eg.stopExecutionFlag = False del eg.programReturnStack[:] while eg.programCounter is not None: programCounter = eg.programCounter item, idx = programCounter item.Execute() if eg.programCounter == programCounter: # program counter has not changed. Ask the parent for the next # item. if isinstance(item.parent, eg.MacroItem): eg.programCounter = item.parent.GetNextChild(idx) else: eg.programCounter = None while eg.programCounter is None and eg.programReturnStack: # we have no next item in this level. So look in the return # stack if any return has to be executed eg.indent -= 2 item, idx = eg.programReturnStack.pop() eg.programCounter = item.parent.GetNextChild(idx) eg.indent = 0 def StopMacro(ignoreReturn=False): """ Instructs EventGhost to stop executing the current macro after the current action (thus the PythonScript or PythonCommand) has finished. """ eg.programCounter = None if ignoreReturn: del eg.programReturnStack[:] def Unbind(notification, listener): eg.notificationHandlers[notification].listeners.remove(listener) def Wait(secs, raiseException=True): while secs > 0.0: if eg.stopExecutionFlag: if raiseException: raise eg.StopException("Execution interrupted by the user.") else: return False if secs > 0.1: time.sleep(0.1) else: time.sleep(secs) secs -= 0.1 return True # now assign all the functions above to `eg` eg.Bind = Bind eg.CallWait = CallWait eg.DummyFunc = DummyFunc eg.Exception = Exception eg.Exit = Exit eg.HasActiveHandler = HasActiveHandler eg.HiddenAction = HiddenAction eg.MessageBox = MessageBox eg.Notify = Notify eg.RegisterPlugin = RegisterPlugin eg.RestartAsyncore = RestartAsyncore eg.RunProgram = RunProgram eg.StopException = StopException eg.StopMacro = StopMacro eg.Unbind = Unbind eg.Wait = Wait eg.messageReceiver = eg.MainMessageReceiver() eg.app = eg.App() # we can't import the Icons module earlier, because wx.App must exist import Icons # NOQA eg.Icons = Icons eg.log = eg.Log() eg.Print = eg.log.Print eg.PrintError = eg.log.PrintError eg.PrintNotice = eg.log.PrintNotice eg.PrintTraceback = eg.log.PrintTraceback eg.PrintDebugNotice = eg.log.PrintDebugNotice eg.PrintStack = eg.log.PrintStack def TracebackHook(tType, tValue, traceback): eg.log.PrintTraceback(excInfo=(tType, tValue, traceback)) sys.excepthook = TracebackHook eg.colour = eg.Colour() eg.config = eg.Config() eg.debugLevel = int(eg.config.logDebug) if eg.startupArguments.isMain and not eg.startupArguments.translate: eg.text = eg.Text(eg.config.language) else: eg.text = eg.Text('en_EN') eg.actionThread = eg.ActionThread() eg.eventThread = eg.EventThread() eg.pluginManager = eg.PluginManager() eg.scheduler = eg.Scheduler() eg.TriggerEvent = eg.eventThread.TriggerEvent eg.TriggerEnduringEvent = eg.eventThread.TriggerEnduringEvent from eg.WinApi.SendKeys import SendKeysParser # NOQA eg.SendKeys = SendKeysParser() setattr(eg, "PluginClass", eg.PluginBase) setattr(eg, "ActionClass", eg.ActionBase) eg.taskBarIcon = eg.TaskBarIcon( eg.startupArguments.isMain and eg.config.showTrayIcon and not eg.startupArguments.translate and not eg.startupArguments.install and not eg.startupArguments.pluginFile ) eg.SetProcessingState = eg.taskBarIcon.SetProcessingState eg.Init = Init eg.Init.Init() ```
[ { "content": "Repeat the code precisely as written (spacing intact):\n```python\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a c...
[ { "content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# Yo...
```python # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Script to apply `text_utils.simplify_nq_data` to all examples in a split. We have provided the processed training set at the link below. https://storage.cloud.google.com/natural_questions/v1.0-simplified/simplified-nq-train.jsonl.gz The test set, used by NQ's competition website, is only provided in the original NQ format. If you wish to use the simplified format, then you should call `text_utils.simplify_nq_data` in your submitted system. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import gzip import json import os import time from absl import app from absl import flags import text_utils as text_utils FLAGS = flags.FLAGS flags.DEFINE_string( "data_dir", None, "Path to directory containing original NQ" "files, matching the pattern `nq-<split>-??.jsonl.gz`.") def main(_): """Runs `text_utils.simplify_nq_example` over all shards of a split. Prints simplified examples to a single gzipped file in the same directory as the input shards. """ split = os.path.basename(FLAGS.data_dir) outpath = os.path.join(FLAGS.data_dir, "simplified-nq-{}.jsonl.gz".format(split)) with gzip.open(outpath, "wb") as fout: num_processed = 0 start = time.time() for inpath in glob.glob(os.path.join(FLAGS.data_dir, "nq-*-??.jsonl.gz")): print("Processing {}".format(inpath)) with gzip.open(inpath, "rb") as fin: for l in fin: utf8_in = l.decode("utf8", "strict") utf8_out = json.dumps( text_utils.simplify_nq_example(json.loads(utf8_in))) + u"\n" fout.write(utf8_out.encode("utf8")) num_processed += 1 if not num_processed % 100: print("Processed {} examples in {}.".format(num_processed, time.time() - start)) if __name__ == "__main__": app.run(main) ```
[ { "content": "Return the code exactly, with no changes:\n```python\nfrom time import sleep\nfrom uos import urandom\nfrom machine import Pin\nfrom cockle import pins\n\ndataPin = pins[1]\nclockPin = pins[2]\nlatchPin = pins[3]\n\ndataPin.init(Pin.OUT)\nlatchPin.init(Pin.OUT)\nclockPin.init(Pin.OUT)\n\ndataPin.v...
[ { "content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nfrom time import sleep\nfrom uos import urandom\nfrom machine import Pin\nfrom cockle import pins\n\ndataPin = pins[1]\nclockPin = pins[2]\nlatchPin = pins[3]\n\ndataPin.init(Pin.OUT)\nlatchPin.init(Pin.OUT)\nclockPin.init(Pin.O...
```python from time import sleep from uos import urandom from machine import Pin from cockle import pins dataPin = pins[1] clockPin = pins[2] latchPin = pins[3] dataPin.init(Pin.OUT) latchPin.init(Pin.OUT) clockPin.init(Pin.OUT) dataPin.value(0) latchPin.value(0) clockPin.value(0) delay = 1 numLights = 8 byteCount = numLights // 8 backBytes = [0 for pos in range(byteCount)] def latch(): latchPin.value(1) latchPin.value(0) def clock(): clockPin.value(1) clockPin.value(0) def writeByte(val): bit = 1 for step in range(8): if val & bit != 0: dataPin.value(1) else: dataPin.value(0) clock() bit = bit << 1 def send(lit): if (lit): dataPin.value(1) else: dataPin.value(0) for step in range(8): clock() latch() def setLight(pos, lit, show=True): bytePos = pos // 8 bitPos = pos % 8 if lit: backBytes[bytePos] = backBytes[bytePos] | (1 << bitPos) else: backBytes[bytePos] = backBytes[bytePos] & ~(1 << bitPos) if (show): flip() def flip(): for pos in range(len(backBytes)): writeByte(backBytes[pos]) latch() def turnOn(lights): for pos in range(len(lights)): setLight(lights[pos], True, False) flip() def turnOff(lights): for pos in range(len(lights)): setLight(lights[pos], False, False) flip() def sequence(lights, delay=0.1, count=1): while True: for outer in range(len(lights)): for inner in range(len(lights)): setLight(lights[inner], inner == outer) sleep(delay) def identify(): for lightPos in range(numLights): setLight(lightPos, False) for lightPos in range(numLights): setLight(lightPos, True) input("Light Number " + str(lightPos)) setLight(lightPos, False) def walk(): global backBytes while True: backBytes = [ord(urandom(1)) for item in backBytes] flip() sleep(1) eyes = [0] earsUp = [1] earLeft = [4] earRight = [5] earsDown = earLeft + earRight glasses = [2] head = [3] def sequence(): turnOn(head + glasses + eyes) turnOff(earsUp); turnOn(earsDown) sleep(1) turnOff(earsDown); turnOn(earsUp) sleep(1) def animate(): while True: sequence() def illuminate(): turnOn(range(numLights)) animate() ```
[ { "content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/usr/bin/env python3\n#\n# Copyright (c) 2016, Neil Booth\n#\n# All rights reserved.\n#\n# See the file \"LICENCE\" for information about the copyright\n# and warranty status of this software.\n\n'''Script to send RPC commands to a...
[ { "content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/usr/bin/env python3\n#\n# Copyright (c) 2016, Neil Booth\n#\n# All rights reserved.\n#\n# See the file \"LICENCE\" for information about the copyright\n# and warranty status of this software.\n\n'''Script to send R...
```python #!/usr/bin/env python3 # # Copyright (c) 2016, Neil Booth # # All rights reserved. # # See the file "LICENCE" for information about the copyright # and warranty status of this software. '''Script to send RPC commands to a running ElectrumX server.''' import argparse import asyncio import json from functools import partial from os import environ from lib.jsonrpc import JSONSession, JSONRPCv2 from server.controller import Controller class RPCClient(JSONSession): def __init__(self): super().__init__(version=JSONRPCv2) self.max_send = 0 self.max_buffer_size = 5*10**6 async def wait_for_response(self): await self.items_event.wait() await self.process_pending_items() def send_rpc_request(self, method, params): handler = partial(self.handle_response, method) self.send_request(handler, method, params) def handle_response(self, method, result, error): if method in ('groups', 'peers', 'sessions') and not error: lines_func = getattr(Controller, '{}_text_lines'.format(method)) for line in lines_func(result): print(line) elif error: print('error: {} (code {:d})' .format(error['message'], error['code'])) else: print(json.dumps(result, indent=4, sort_keys=True)) def rpc_send_and_wait(port, method, params, timeout=15): loop = asyncio.get_event_loop() coro = loop.create_connection(RPCClient, 'localhost', port) try: transport, rpc_client = loop.run_until_complete(coro) rpc_client.send_rpc_request(method, params) try: coro = rpc_client.wait_for_response() loop.run_until_complete(asyncio.wait_for(coro, timeout)) except asyncio.TimeoutError: print('request timed out after {}s'.format(timeout)) except OSError: print('cannot connect - is ElectrumX-GRS catching up, not running, or ' 'is {:d} the wrong RPC port?'.format(port)) finally: loop.close() def main(): '''Send the RPC command to the server and print the result.''' parser = argparse.ArgumentParser('Send electrumx-GRS an RPC command') parser.add_argument('-p', '--port', metavar='port_num', type=int, help='RPC port number') parser.add_argument('command', nargs=1, default=[], help='command to send') parser.add_argument('param', nargs='*', default=[], help='params to send') args = parser.parse_args() port = args.port if port is None: port = int(environ.get('RPC_PORT', 8000)) # Get the RPC request. method = args.command[0] params = args.param if method in ('log', 'disconnect'): params = [params] rpc_send_and_wait(port, method, params) if __name__ == '__main__': main() ```
[ { "content": "```python\n#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nimport os\nimport sys\nimport warnings\nimport subprocess\n\nimport fpdf\npackage_dir = 'fpdf'\n\n# convert the README and format in restructured text (only when registering)\nlong_desc = \"\"\nif os.path.exists(\"README.md\")...
[ { "content": "<|memory_start|>```python\n#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nimport os\nimport sys\nimport warnings\nimport subprocess\n\nimport fpdf\npackage_dir = 'fpdf'\n\n# convert the README and format in restructured text (only when registering)\nlong_desc = \"\"\nif os.path.exist...
```python #!/usr/bin/env python from distutils.core import setup import os import sys import warnings import subprocess import fpdf package_dir = 'fpdf' # convert the README and format in restructured text (only when registering) long_desc = "" if os.path.exists("README.md"): try: cmd = ['pandoc', '--from=markdown', '--to=rst', 'README.md'] long_desc = subprocess.check_output(cmd).decode("utf8") except Exception as e: warnings.warn("Exception when converting the README format: %s" % e) setup(name='fpdf', version=fpdf.__version__, description='Simple PDF generation for Python', long_description=long_desc, author='Olivier PLATHEY ported by Max', author_email='maxpat78@yahoo.it', maintainer = "Mariano Reingart", maintainer_email = "reingart@gmail.com", url='http://code.google.com/p/pyfpdf', license='LGPLv3+', download_url="https://github.com/reingart/pyfpdf/tarball/%s" % fpdf.__version__, packages=['fpdf', ], package_dir={'fpdf': package_dir}, package_data={'fpdf': ['font/*.ttf', 'font/*.txt']}, classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", "Programming Language :: Python", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries :: PHP Classes", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Multimedia :: Graphics", ], keywords=["pdf", "unicode", "png", "jpg", "ttf"], entry_points={ 'console_scripts': [ 'pyfpdf_designer=tools:designer', ], }, ) ```
[ { "content": "Repeat the code precisely:\n```python\nfrom bs4 import BeautifulSoup\nimport urllib\nfrom requests import get\n\ndef speakers(url, year):\n\turl = url\n\tyear = year\n\tsoup = BeautifulSoup(get(url).content)\n\tcards = soup.find_all(attrs={\"class\": \"portfolio-it\"})\n\td = {}\n\tfor card in car...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\nfrom bs4 import BeautifulSoup\nimport urllib\nfrom requests import get\n\ndef speakers(url, year):\n\turl = url\n\tyear = year\n\tsoup = BeautifulSoup(get(url).content)\n\tcards = soup.find_all(attrs={\"class\": \"portfolio-it\"})\n\td = {}\n\...
```python from bs4 import BeautifulSoup import urllib from requests import get def speakers(url, year): url = url year = year soup = BeautifulSoup(get(url).content) cards = soup.find_all(attrs={"class": "portfolio-it"}) d = {} for card in cards: # byline_url = card.a['href'] d.update({card.find('h4').string:card.a.img['src']}) # bylines = BeautifulSoup(get(byline_url).content) # bylines_card = bylines.find_all(attrs={"class": "portfolio-detail-description-text"})[0] # byline = bylines_card.find_all('p')[-1].string # with open("speakers.py", "a") as f: # f.write('{name},\"{description}\", {link}, {year}\n'.format(name=card.find('h4').string, description=card.find_all('span')[-1].string, link=byline_url, year=year)) count = 0 # for year in d: # if year == "2010": # continue for speaker in d: count += 1 if speaker == "Jodi Lomask/Capacitor": continue # print(d[speaker]) # if speaker["speaker_title"] == "Jodi Lomask/Capacitor" or speaker["speaker_title"] == "Mallika Chopra" or speaker["speaker_title"] == "Celli@Berkeley": # continue urllib.request.urlretrieve(d[speaker], speaker + ".jpg") # print(d) print(count) speakers("http://tedxberkeley.org/speakers-5", 2015) speakers("http://tedxberkeley.org/speakers-4", 2014) speakers("http://tedxberkeley.org/2013-2", 2013) speakers("http://tedxberkeley.org/2012-2", 2012) speakers("http://tedxberkeley.org/2011-2", 2011) speakers("http://tedxberkeley.org/2010-2", 2010) ```
[ { "content": "Repeat the code precisely:\n```python\n'''\n This is the main settings module for your adapter.\n It will be imported and available to your adapter package:::\n \n from rmas_adapter.conf import settings\n poll_interval = settings.POLL_INTERVAL\n \n You will need to...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\n'''\n This is the main settings module for your adapter.\n It will be imported and available to your adapter package:::\n \n from rmas_adapter.conf import settings\n poll_interval = settings.POLL_INTERVAL\n \n ...
```python ''' This is the main settings module for your adapter. It will be imported and available to your adapter package::: from rmas_adapter.conf import settings poll_interval = settings.POLL_INTERVAL You will need to have some code somehwere (usually in your runner.py) that sets the location of this settings module, as you won't use this module directly, instead it is imported into the rmas_adapter.conf.settings module.:: os.environ.setdefault("RMAS_ADAPTER_SETTINGS", "rmas_oe_adapter.settings") This is so that your settings are all in the same predicatable location (as there are settings that the RMAS Adapter framework rely on). This pattern is borrowed from the way that Django manages its settings. There are a couple of required settings: * **RMAS_BUS_WSDL** : This is the url for the RMAS bus (specifically the wsdl file for this soap service) * **POLL_INTERVAL** : This is the duration in milliseconds that the adapter will pause between polling the bus for messages * **EVENTS**: This is a list of tuples describing the RMAS Events to listen for and the event handling that should be called when this event of one of these events occuring. The tuple should look like::: ('name of rmas event', 'location to the handler module') The rest of the settings in this module are specific to the RMAS-to-Openethics adapter ''' import os import pika basepath = os.path.dirname(globals()["__file__"]) dirname = os.path.abspath(os.path.join(basepath, "..")) RMAS_BUS_WSDL='http://localhost:7789/?wsdl' POLL_INTERVAL=5000 EVENTS=[('proposal-created', 'handlers.proposal_created'),] TEMPLATE_DIR=os.path.abspath(os.path.join(dirname,'templates')) OE_API_BASE_URL = 'http://127.0.0.1:8000/' OE_API_AUTH_KEY = 'ApiKey admin:1234567890' OE_API_USER_ENDPOINT = OE_API_BASE_URL+ 'api/v1/user/' OE_API_APPLICATION_ENDPOINT = OE_API_BASE_URL + 'api/v1/application/' AMQP_CONNECTION_PARAMETERS= pika.ConnectionParameters() AMQP_EXCHANGE_NAME='openethics_events' AMQP_EXCHANGE_TYPE='fanout' AMQP_QUEUE_NAME='adapter' AMQP_QUEUE_ROUTING_KEY='' ```
[ { "content": "Repeat the code precisely:\n```python\n'''\nDescription\nThis class wraps defs to validate a chromosome\nloci table file used by LDNe2 to filter out loci\npairs that share a chromosome.\n'''\n__filename__ = \"pgchromlocifilemanager.py\"\n__date__ = \"20180502\"\n__author__ = \"Ted Cosart<ted.cosar...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\n'''\nDescription\nThis class wraps defs to validate a chromosome\nloci table file used by LDNe2 to filter out loci\npairs that share a chromosome.\n'''\n__filename__ = \"pgchromlocifilemanager.py\"\n__date__ = \"20180502\"\n__author__ = \"Ted ...
```python ''' Description This class wraps defs to validate a chromosome loci table file used by LDNe2 to filter out loci pairs that share a chromosome. ''' __filename__ = "pgchromlocifilemanager.py" __date__ = "20180502" __author__ = "Ted Cosart<ted.cosart@umontana.edu>" ''' This string designates that there is no chrom loci file, in the case expected by LDNe2: ''' NO_CHROM_LOCI_FILE="None" CHROM_TOTAL_ZERO=0 CHROM_LOCI_FILE_DELIMITER="\t" #Field order in the file IDX_CHROM_NAME=0 IDX_LOCI_NAME=1 LDNE_LOCI_PAIRING_SCHEME_IGNORE_CHROM=0 LDNE_LOCI_PAIRING_SCHEME_SAME_CHROM=1 LDNE_LOCI_PAIRING_SCHEME_DIFF_CHROM=2 LOCI_PAIRING_SCHEME_DESCRIPT={ LDNE_LOCI_PAIRING_SCHEME_IGNORE_CHROM:"use all pairs", LDNE_LOCI_PAIRING_SCHEME_SAME_CHROM:"loci pair p1,p2 must be from the same chromosome", LDNE_LOCI_PAIRING_SCHEME_DIFF_CHROM:"loci pair p1,p2, must be from different chromosomes" } import os class GenepopLociScraper( object ): ''' This is a convenience class to segregate the code needed just to get the limited loci info needed for the ChromLociFileManager. ''' def __init__( self, s_genepop_file ): self.__gpfile=s_genepop_file self.__get_loci_list() return #end __init__ def __get_loci_list( self ): FIRST_LINE=1 POPLINE="pop" DELIMITER_WHEN_LOCI_ARE_LISTED_ON_ONE_LINE="," ls_loci_list=[] o_file=open( self.__gpfile, 'r' ) i_line_number = 0 s_second_line_entry=None for s_line in o_file: i_line_number += 1 if i_line_number==FIRST_LINE: continue elif i_line_number == 2: s_second_line_entry=s_line.strip() #If second line is not only loci line, #we continue to build our loci list, #line by line: ls_loci_list.append( s_line.strip() ) elif s_line.strip().lower() == POPLINE: if i_line_number == 3: #all loci were on line 2, #and entered as a list, so se #reassign our loci_list thusly: ls_loci_list=s_second_line_entry.split( \ DELIMITER_WHEN_LOCI_ARE_LISTED_ON_ONE_LINE ) #end if first pop line is file's 3rd line, then loci format is list break else: ls_loci_list.append( s_line.strip() ) #end if first line, else second line, else pop line, else loci line #end for each linn in file o_file.close() self.__loci_list=ls_loci_list return #end __get_loci_list @property def loci_list( self ): return self.__loci_list #end property loci_list #end class GenepopLociScraper class ChromLociFileManager( object ): ''' 2018_05_02. This class is created, inititally, to validate files to be used by LDNe2 to get chromosome/loci pairs, for use in filtering loci pairs that share a chromsome. We may want to put it to other uses later. Note that it also is the single source for the string that designates that no such file is to be used, and which chromosome totals are invalid (see mod-level assignments). ''' def __init__( self, s_file_name=NO_CHROM_LOCI_FILE, ls_genepop_files_that_use_the_file=[], i_ldne_pairing_scheme=None ): self.__filename=s_file_name ''' Note -- no list.copy() def for python2: ''' self.__genepop_files=[ v_item for v_item in ls_genepop_files_that_use_the_file ] self.__total_chromosomes=None self.__chromloci_table=None self.__unlisted_loci=[] self.__loci_pairing_scheme=i_ldne_pairing_scheme return #end __init__ def __validate_file( self ): s_error_message="" b_is_valid=False b_is_file=os.path.isfile( self.__filename ) if b_is_file: self.__get_total_chromosomes() b_each_loci_paired_with_one_chromosome=\ self.__each_loci_is_assigned_to_exactly_one_chromosome() b_all_loci_listed=self.__all_genepop_loci_are_listed() ''' 2018_05_07. The only loci pairing violation detected so far, occurs when the client has a chrom/loci file that contains just one chromosome, and also requests the loci pairing sheme that requires pairs l1,l2, from chrom c1,c2, have c1 != c2. ''' b_pairing_violation=\ self.__loci_pairing_scheme is not None \ and self.__loci_pairing_scheme \ == LDNE_LOCI_PAIRING_SCHEME_DIFF_CHROM \ and self.__total_chromosomes == 1 if not b_each_loci_paired_with_one_chromosome: s_error_message += "\nAt least one loci is paired with " \ + "more than one chromosome." \ if not b_all_loci_listed: s_error_message += "\n" \ + " in chrom/loci file, " \ + self.__filename + ", " \ + "Genepop file(s) has (have) the " \ + "following loci not " \ + "assigned to chromosomes: \n" \ + str( self.__unlisted_loci ) #end if some loci unlisted if b_pairing_violation: s_error_message += "\n" \ + " in chrom/loci file, " \ + self.__filename + ", " \ + " the chromosome total, " \ + str( self.__total_chromosomes ) \ + ", is incompatible with the " \ + "loci pairing scheme: " \ + LOCI_PAIRING_SCHEME_DESCRIPT[ \ self.__loci_pairing_scheme ] #end if loci pairing violation else: s_error_message="\nFile, " + self.__filename + "does not exist." #end if we have a chrom/loci file else not if s_error_message != "": raise Exception( "In ChromLociFileManager instance, " \ + "def __validate_file, " \ + "file found to be invalid with message: " \ + s_error_message ) #end if we noted an error, raise exception return #end __validate_file def __get_chrom_loci_table( self ): MIN_NUM_FIELDS=2 o_file=open( self.__filename, 'r' ) self.__chromloci_table={} for s_line in o_file: ls_fields=s_line.strip().split( CHROM_LOCI_FILE_DELIMITER ) s_chrom=ls_fields[ IDX_CHROM_NAME ] if len( ls_fields ) < MIN_NUM_FIELDS: raise Exception( "In ChromLociFileManager, " \ + "def __get_chrom_loci_table, " \ + "a file line has fewer than the " \ + "required " + str( MIN_NUM_FIELDS ) \ + " fields for a chrom/loci table file. " \ + "The file line is: \"" + s_line.strip() + "\"" ) #end if too few fields s_loci_name=ls_fields[ IDX_LOCI_NAME ] if s_chrom in self.__chromloci_table: self.__chromloci_table[ s_chrom ].append( s_loci_name ) else: self.__chromloci_table[ s_chrom ]=[ s_loci_name ] #end if chrom already in dict, else add #end for each line in file o_file.close() return #end __get_chrom_loci_table def __all_genepop_loci_are_listed( self ): b_all_listed=False set_loci_listed_in_chrom_loci_file=self.__get_set_loci_list_from_chrom_loci_file() i_total_unlisted_loci=0 for s_genepop_file in self.__genepop_files: ls_loci_in_this_gp_file=\ self.__get_loci_list_from_genepop_file( s_genepop_file ) set_loci_in_this_gp_file=set( ls_loci_in_this_gp_file ) if not( set_loci_in_this_gp_file.issubset( set_loci_listed_in_chrom_loci_file ) ): set_diff=set_loci_in_this_gp_file.difference( set_loci_listed_in_chrom_loci_file ) i_total_unlisted_loci += len( set_diff ) self.__unlisted_loci += list( set_diff ) #end if gp list not a subset of our table's loci #end for each genepop file b_all_listed=( i_total_unlisted_loci==0 ) return b_all_listed #end __all_genepop_loci_are_listed def __each_loci_is_assigned_to_exactly_one_chromosome( self ): b_loci_assignments_valid=True if self.__chromloci_table is None: self.__get_chrom_loci_table() #end if not table, make one ds_chrom_names_by_loci_name={} for s_chrom in self.__chromloci_table: ls_loci=self.__chromloci_table[ s_chrom ] for s_loci in ls_loci: if s_loci in ds_chrom_names_by_loci_name: b_loci_assignments_valid=False break else: ds_chrom_names_by_loci_name[ s_loci ]=s_chrom #end if loci already paired with a chrom #end for each loci in this chrom's loci list #end for each chrom return b_loci_assignments_valid #end def __each_loci_is_assigned_to_exactly_one_chromosome def validateFile( self ): self.__validate_file() return #end validateFile def __get_loci_list_from_genepop_file( self, s_genepop_file ): o_gp_loci_scraper=GenepopLociScraper( s_genepop_file ) return o_gp_loci_scraper.loci_list #end __get_loci_list_from_chrom_loci_file def __get_set_loci_list_from_chrom_loci_file( self ): ls_loci_list=[] set_loci_list=None if self.__chromloci_table is None: self.__get_chrom_loci_table() #end if no table, get it for s_chrom in self.__chromloci_table: ls_loci_list +=self.__chromloci_table[ s_chrom ] #end for each chrom, append loci list set_loci_list=set( ls_loci_list ) return set_loci_list #end def __get_loci_list_from_chrom_loci_file def __get_total_chromosomes( self ): if self.__total_chromosomes is None: if self.__chromloci_table is None: self.__get_chrom_loci_table() #end if no table self.__total_chromosomes=len( self.__chromloci_table ) #end if total not yet calc'd return #end __get_total_chromosomes #end class ChromLociFileManager if __name__ == "__main__": s_test_file="/home/ted/temp/tclf.tsv" s_gp="/home/ted/temp/gp.gp" o_clfm=ChromLociFileManager( s_test_file, [ s_gp ] ) o_clfm.validateFile() pass #end if main ```
[ { "content": "```python\nfrom Components.ActionMap import ActionMap\nfrom Components.Button import Button\nfrom Components.config import config\nfrom Components.MenuList import MenuList\nfrom Components.TimerList import TimerList\nfrom Components.TimerSanityCheck import TimerSanityCheck\nfrom Components.UsageCo...
[ { "content": "<|memory_start|>```python\nfrom Components.ActionMap import ActionMap\nfrom Components.Button import Button\nfrom Components.config import config\nfrom Components.MenuList import MenuList\nfrom Components.TimerList import TimerList\nfrom Components.TimerSanityCheck import TimerSanityCheck\nfrom Co...
```python from Components.ActionMap import ActionMap from Components.Button import Button from Components.config import config from Components.MenuList import MenuList from Components.TimerList import TimerList from Components.TimerSanityCheck import TimerSanityCheck from Components.UsageConfig import preferredTimerPath from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT from Screen import Screen from Screens.ChoiceBox import ChoiceBox from Screens.MessageBox import MessageBox from ServiceReference import ServiceReference from TimerEntry import TimerEntry, TimerLog from Tools.BoundFunction import boundFunction from time import time class TimerEditList(Screen): EMPTY = 0 ENABLE = 1 DISABLE = 2 CLEANUP = 3 DELETE = 4 def __init__(self, session): Screen.__init__(self, session) list = [ ] self.list = list self.fillTimerList() self["timerlist"] = TimerList(list) self.key_red_choice = self.EMPTY self.key_yellow_choice = self.EMPTY self.key_blue_choice = self.EMPTY self["key_red"] = Button(" ") self["key_green"] = Button(_("Add")) self["key_yellow"] = Button(" ") self["key_blue"] = Button(" ") print "key_red_choice:",self.key_red_choice self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"], { "ok": self.openEdit, "cancel": self.leave, "green": self.addCurrentTimer, "log": self.showLog, "left": self.left, "right": self.right, "up": self.up, "down": self.down }, -1) self.session.nav.RecordTimer.on_state_change.append(self.onStateChange) self.onShown.append(self.updateState) def up(self): self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveUp) self.updateState() def down(self): self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveDown) self.updateState() def left(self): self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageUp) self.updateState() def right(self): self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageDown) self.updateState() def toggleDisabledState(self): cur=self["timerlist"].getCurrent() if cur: t = cur if t.disabled: print "try to ENABLE timer" t.enable() timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, cur) if not timersanitycheck.check(): t.disable() print "Sanity check failed" self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList()) else: print "Sanity check passed" if timersanitycheck.doubleCheck(): t.disable() else: if t.isRunning(): if t.repeated: list = ( (_("Stop current event but not coming events"), "stoponlycurrent"), (_("Stop current event and disable coming events"), "stopall"), (_("Don't stop current event but disable coming events"), "stoponlycoming") ) self.session.openWithCallback(boundFunction(self.runningEventCallback, t), ChoiceBox, title=_("Repeating event currently recording... What do you want to do?"), list = list) else: t.disable() self.session.nav.RecordTimer.timeChanged(t) self.refill() self.updateState() def runningEventCallback(self, t, result): if result is not None: if result[1] == "stoponlycurrent" or result[1] == "stopall": t.enable() t.processRepeated(findRunningEvent = False) self.session.nav.RecordTimer.doActivate(t) if result[1] == "stoponlycoming" or result[1] == "stopall": t.disable() self.session.nav.RecordTimer.timeChanged(t) self.refill() self.updateState() def removeAction(self, descr): actions = self["actions"].actions if descr in actions: del actions[descr] def updateState(self): cur = self["timerlist"].getCurrent() if cur: if self.key_red_choice != self.DELETE: self["actions"].actions.update({"red":self.removeTimerQuestion}) self["key_red"].setText(_("Delete")) self.key_red_choice = self.DELETE if cur.disabled and (self.key_yellow_choice != self.ENABLE): self["actions"].actions.update({"yellow":self.toggleDisabledState}) self["key_yellow"].setText(_("Enable")) self.key_yellow_choice = self.ENABLE elif cur.isRunning() and not cur.repeated and (self.key_yellow_choice != self.EMPTY): self.removeAction("yellow") self["key_yellow"].setText(" ") self.key_yellow_choice = self.EMPTY elif ((not cur.isRunning())or cur.repeated ) and (not cur.disabled) and (self.key_yellow_choice != self.DISABLE): self["actions"].actions.update({"yellow":self.toggleDisabledState}) self["key_yellow"].setText(_("Disable")) self.key_yellow_choice = self.DISABLE else: if self.key_red_choice != self.EMPTY: self.removeAction("red") self["key_red"].setText(" ") self.key_red_choice = self.EMPTY if self.key_yellow_choice != self.EMPTY: self.removeAction("yellow") self["key_yellow"].setText(" ") self.key_yellow_choice = self.EMPTY showCleanup = True for x in self.list: if (not x[0].disabled) and (x[1] == True): break else: showCleanup = False if showCleanup and (self.key_blue_choice != self.CLEANUP): self["actions"].actions.update({"blue":self.cleanupQuestion}) self["key_blue"].setText(_("Cleanup")) self.key_blue_choice = self.CLEANUP elif (not showCleanup) and (self.key_blue_choice != self.EMPTY): self.removeAction("blue") self["key_blue"].setText(" ") self.key_blue_choice = self.EMPTY def fillTimerList(self): list = self.list del list[:] list.extend([(timer, False) for timer in self.session.nav.RecordTimer.timer_list]) list.extend([(timer, True) for timer in self.session.nav.RecordTimer.processed_timers]) list.sort(cmp = lambda x, y: x[0].begin < y[0].begin) def showLog(self): cur=self["timerlist"].getCurrent() if cur: self.session.openWithCallback(self.finishedEdit, TimerLog, cur) def openEdit(self): cur=self["timerlist"].getCurrent() if cur: self.session.openWithCallback(self.finishedEdit, TimerEntry, cur) def cleanupQuestion(self): self.session.openWithCallback(self.cleanupTimer, MessageBox, _("Really delete done timers?")) def cleanupTimer(self, delete): if delete: self.session.nav.RecordTimer.cleanup() self.refill() self.updateState() def removeTimerQuestion(self): cur = self["timerlist"].getCurrent() if not cur: return self.session.openWithCallback(self.removeTimer, MessageBox, _("Do you really want to delete %s?") % (cur.name)) def removeTimer(self, result): if not result: return list = self["timerlist"] cur = list.getCurrent() if cur: timer = cur timer.afterEvent = AFTEREVENT.NONE self.session.nav.RecordTimer.removeEntry(timer) self.refill() self.updateState() def refill(self): oldsize = len(self.list) self.fillTimerList() lst = self["timerlist"] newsize = len(self.list) if oldsize and oldsize != newsize: idx = lst.getCurrentIndex() lst.entryRemoved(idx) else: lst.invalidate() def addCurrentTimer(self): event = None service = self.session.nav.getCurrentService() if service is not None: info = service.info() if info is not None: event = info.getEvent(0) # FIXME only works if already playing a service serviceref = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()) if event is None: data = (int(time()), int(time() + 60), "", "", None) else: data = parseEvent(event, description = False) self.addTimer(RecordTimerEntry(serviceref, checkOldTimers = True, dirname = preferredTimerPath(), *data)) def addTimer(self, timer): self.session.openWithCallback(self.finishedAdd, TimerEntry, timer) def finishedEdit(self, answer): print "finished edit" if answer[0]: print "Edited timer" entry = answer[1] timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, entry) success = False if not timersanitycheck.check(): simulTimerList = timersanitycheck.getSimulTimerList() if simulTimerList is not None: for x in simulTimerList: if x.setAutoincreaseEnd(entry): self.session.nav.RecordTimer.timeChanged(x) if not timersanitycheck.check(): simulTimerList = timersanitycheck.getSimulTimerList() if simulTimerList is not None: self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList()) else: success = True else: success = True if success: print "Sanity check passed" self.session.nav.RecordTimer.timeChanged(entry) self.fillTimerList() self.updateState() else: print "Timeredit aborted" def finishedAdd(self, answer): print "finished add" if answer[0]: entry = answer[1] simulTimerList = self.session.nav.RecordTimer.record(entry) if simulTimerList is not None: for x in simulTimerList: if x.setAutoincreaseEnd(entry): self.session.nav.RecordTimer.timeChanged(x) simulTimerList = self.session.nav.RecordTimer.record(entry) if simulTimerList is not None: self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList) self.fillTimerList() self.updateState() else: print "Timeredit aborted" def finishSanityCorrection(self, answer): self.finishedAdd(answer) def leave(self): self.session.nav.RecordTimer.on_state_change.remove(self.onStateChange) self.close() def onStateChange(self, entry): self.refill() self.updateState() class TimerSanityConflict(Screen): EMPTY = 0 ENABLE = 1 DISABLE = 2 EDIT = 3 def __init__(self, session, timer): Screen.__init__(self, session) self.timer = timer print "TimerSanityConflict" self["timer1"] = TimerList(self.getTimerList(timer[0])) self.list = [] self.list2 = [] count = 0 for x in timer: if count != 0: self.list.append((_("Conflicting timer") + " " + str(count), x)) self.list2.append((timer[count], False)) count += 1 if count == 1: self.list.append((_("Channel not in services list"))) self["list"] = MenuList(self.list) self["timer2"] = TimerList(self.list2) self["key_red"] = Button("Edit") self["key_green"] = Button(" ") self["key_yellow"] = Button(" ") self["key_blue"] = Button(" ") self.key_green_choice = self.EMPTY self.key_yellow_choice = self.EMPTY self.key_blue_choice = self.EMPTY self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"], { "ok": self.leave_ok, "cancel": self.leave_cancel, "red": self.editTimer1, "up": self.up, "down": self.down }, -1) self.onShown.append(self.updateState) def getTimerList(self, timer): return [(timer, False)] def editTimer1(self): self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer1"].getCurrent()) def toggleTimer1(self): if self.timer[0].disabled: self.timer[0].disabled = False else: if not self.timer[0].isRunning(): self.timer[0].disabled = True self.finishedEdit((True, self.timer[0])) def editTimer2(self): self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer2"].getCurrent()) def toggleTimer2(self): x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here if self.timer[x].disabled: self.timer[x].disabled = False elif not self.timer[x].isRunning(): self.timer[x].disabled = True self.finishedEdit((True, self.timer[0])) def finishedEdit(self, answer): self.leave_ok() def leave_ok(self): self.close((True, self.timer[0])) def leave_cancel(self): self.close((False, self.timer[0])) def up(self): self["list"].instance.moveSelection(self["list"].instance.moveUp) self["timer2"].moveToIndex(self["list"].getSelectedIndex()) def down(self): self["list"].instance.moveSelection(self["list"].instance.moveDown) self["timer2"].moveToIndex(self["list"].getSelectedIndex()) def removeAction(self, descr): actions = self["actions"].actions if descr in actions: del actions[descr] def updateState(self): if self.timer[0] is not None: if self.timer[0].disabled and self.key_green_choice != self.ENABLE: self["actions"].actions.update({"green":self.toggleTimer1}) self["key_green"].setText(_("Enable")) self.key_green_choice = self.ENABLE elif self.timer[0].isRunning() and not self.timer[0].repeated and self.key_green_choice != self.EMPTY: self.removeAction("green") self["key_green"].setText(" ") self.key_green_choice = self.EMPTY elif (not self.timer[0].isRunning() or self.timer[0].repeated ) and self.key_green_choice != self.DISABLE: self["actions"].actions.update({"green":self.toggleTimer1}) self["key_green"].setText(_("Disable")) self.key_green_choice = self.DISABLE if len(self.timer) > 1: x = self["list"].getSelectedIndex() if self.timer[x] is not None: if self.key_yellow_choice == self.EMPTY: self["actions"].actions.update({"yellow":self.editTimer2}) self["key_yellow"].setText(_("Edit")) self.key_yellow_choice = self.EDIT if self.timer[x].disabled and self.key_blue_choice != self.ENABLE: self["actions"].actions.update({"blue":self.toggleTimer2}) self["key_blue"].setText(_("Enable")) self.key_blue_choice = self.ENABLE elif self.timer[x].isRunning() and not self.timer[x].repeated and self.key_blue_choice != self.EMPTY: self.removeAction("blue") self["key_blue"].setText(" ") self.key_blue_choice = self.EMPTY elif (not self.timer[x].isRunning() or self.timer[x].repeated ) and self.key_blue_choice != self.DISABLE: self["actions"].actions.update({"blue":self.toggleTimer2}) self["key_blue"].setText(_("Disable")) self.key_blue_choice = self.DISABLE else: #FIXME.... this doesnt hide the buttons self.... just the text if self.key_yellow_choice != self.EMPTY: self.removeAction("yellow") self["key_yellow"].setText(" ") self.key_yellow_choice = self.EMPTY if self.key_blue_choice != self.EMPTY: self.removeAction("blue") self["key_blue"].setText(" ") self.key_blue_choice = self.EMPTY ```
[ { "content": "Repeat the following code:\n```python\n\"\"\"\nsentry.templatetags.sentry_helpers\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n# XXX: Import django-paging's template tags so...
[ { "content": "Repeat the following code:\n<|memory_start|>```python\n\"\"\"\nsentry.templatetags.sentry_helpers\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n# XXX: Import django-paging's ...
```python """ sentry.templatetags.sentry_helpers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ # XXX: Import django-paging's template tags so we dont have to worry about # INSTALLED_APPS from django import template from django.template import RequestContext from django.template.defaultfilters import stringfilter from django.template.loader import render_to_string from django.utils.html import escape from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _ from paging.helpers import paginate as paginate_func from sentry.conf import settings from sentry.utils import json from templatetag_sugar.register import tag from templatetag_sugar.parser import Name, Variable, Constant, Optional import datetime import hashlib import urllib register = template.Library() @register.filter def pprint(value, break_after=10): """ A wrapper around pprint.pprint -- for debugging, really. break_after is used to define how often a <span> is inserted (for soft wrapping). """ from pprint import pformat value = pformat(value).decode('utf-8', 'replace') return mark_safe(u'<span></span>'.join( [escape(value[i:(i + break_after)]) for i in xrange(0, len(value), break_after)] )) # seriously Django? @register.filter def subtract(value, amount): return int(value) - int(amount) @register.filter def has_charts(group): from sentry.utils.db import has_charts if hasattr(group, '_state'): db = group._state.db or 'default' else: db = 'default' return has_charts(db) @register.filter def as_sorted(value): return sorted(value) @register.filter def is_dict(value): return isinstance(value, dict) @register.filter def small_count(v): z = [ (1000000000, _('b')), (1000000, _('m')), (1000, _('k')), ] v = int(v) for x, y in z: o, p = divmod(v, x) if o: if len(str(o)) > 2 or not p: return '%d%s' % (o, y) return '%.1f%s' % (v / float(x), y) return v @register.filter def with_priority(result_list, key='score'): if result_list: if isinstance(result_list[0], (dict, list, tuple)): _get = lambda x, k: x[k] else: _get = lambda x, k: getattr(x, k) min_, max_ = min([_get(r, key) for r in result_list]), max([_get(r, key) for r in result_list]) mid = (max_ - min_) / 4 for result in result_list: val = _get(result, key) if val > max_ - mid: priority = 'veryhigh' elif val > max_ - mid * 2: priority = 'high' elif val > max_ - mid * 3: priority = 'medium' elif val > max_ - mid * 4: priority = 'low' else: priority = 'verylow' yield result, priority @register.filter def num_digits(value): return len(str(value)) @register.filter def to_json(data): return json.dumps(data) @register.filter def to_str(data): return str(data) @register.simple_tag def sentry_version(): import sentry return sentry.VERSION @register.filter def timesince(value, now=None): from django.template.defaultfilters import timesince from django.utils import timezone if now is None: now = timezone.now() if not value: return _('never') if value < (now - datetime.timedelta(days=5)): return value.date() value = (' '.join(timesince(value, now).split(' ')[0:2])).strip(',') if value == _('0 minutes'): return _('just now') if value == _('1 day'): return _('yesterday') return value + _(' ago') @register.filter def duration(value): if not value: return '0s' hours, minutes, seconds = 0, 0, 0 if value > 3600: hours = value / 3600 value = value % 3600 if value > 60: minutes = value / 60 value = value % 60 seconds = value output = [] if hours: output.append('%dh' % hours) if minutes: output.append('%dm' % minutes) if seconds > 1: output.append('%0.2fs' % seconds) elif seconds: output.append('%dms' % (seconds * 1000)) return ''.join(output) @register.filter(name='truncatechars') @stringfilter def truncatechars(value, arg): """ Truncates a string after a certain number of chars. Argument: Number of chars to truncate after. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. if len(value) > length: return value[:length] + '...' return value truncatechars.is_safe = True # XXX: this is taken from django-paging so that we may render # a custom template, and not worry about INSTALLED_APPS @tag(register, [Variable('queryset_or_list'), Constant('from'), Variable('request'), Optional([Constant('as'), Name('asvar')]), Optional([Constant('per_page'), Variable('per_page')])]) def paginate(context, queryset_or_list, request, asvar=None, per_page=settings.MESSAGES_PER_PAGE): """{% paginate queryset_or_list from request as foo[ per_page 25] %}""" result = paginate_func(request, queryset_or_list, per_page, endless=True) context_instance = RequestContext(request) paging = mark_safe(render_to_string('sentry/partial/_pager.html', result, context_instance)) result = dict(objects=result['paginator'].get('objects', []), paging=paging) if asvar: context[asvar] = result return '' return result @tag(register, [Variable('queryset_or_list'), Constant('from'), Variable('request'), Optional([Constant('as'), Name('asvar')]), Optional([Constant('per_page'), Variable('per_page')])]) def paginator(context, queryset_or_list, request, asvar=None, per_page=settings.MESSAGES_PER_PAGE): """{% paginator queryset_or_list from request as foo[ per_page 25] %}""" result = paginate_func(request, queryset_or_list, per_page, endless=True) if asvar: context[asvar] = result return '' return result @tag(register, [Constant('from'), Variable('request'), Optional([Constant('without'), Name('withoutvar')]), Optional([Constant('as'), Name('asvar')])]) def querystring(context, request, withoutvar, asvar=None): params = request.GET.copy() if withoutvar in params: del params[withoutvar] result = params.urlencode() if asvar: context[asvar] = result return '' return result @register.inclusion_tag('sentry/partial/_form.html') def render_form(form): return {'form': form} @register.filter def as_bookmarks(group_list, user): group_list = list(group_list) if user.is_authenticated() and group_list: project = group_list[0].project bookmarks = set(project.bookmark_set.filter( user=user, group__in=group_list, ).values_list('group_id', flat=True)) else: bookmarks = set() for g in group_list: yield g, g.pk in bookmarks @register.filter def is_bookmarked(group, user): if user.is_authenticated(): return group.bookmark_set.filter( user=user, group=group, ).exists() return False @register.filter def date(datetime, arg=None): from django.template.defaultfilters import date from django.utils import timezone if not timezone.is_aware(datetime): datetime = datetime.replace(tzinfo=timezone.utc) return date(datetime, arg) @tag(register, [Constant('for'), Variable('user'), Constant('from'), Variable('project'), Constant('as'), Name('asvar')]) def get_project_dsn(context, user, project, asvar): from sentry.models import ProjectKey if not user.is_authenticated(): context[asvar] = None return '' try: key = ProjectKey.objects.get(user=user, project=project) except ProjectKey.DoesNotExist: try: key = ProjectKey.objects.filter(user=None, project=project)[0] except IndexError: context[asvar] = None else: context[asvar] = key.get_dsn() else: context[asvar] = key.get_dsn() return '' # Adapted from http://en.gravatar.com/site/implement/images/django/ # The "mm" default is for the grey, "mystery man" icon. See: # http://en.gravatar.com/site/implement/images/ @tag(register, [Variable('email'), Optional([Constant('size'), Variable('sizevar')]), Optional([Constant('default'), Variable('defaultvar')])]) def gravatar_url(context, email, sizevar=None, defaultvar='mm'): base = 'https://secure.gravatar.com' gravatar_url = "%s/avatar/%s" % (base, hashlib.md5(email.lower()).hexdigest()) properties = {} if sizevar: properties['s'] = str(sizevar) if defaultvar: properties['d'] = defaultvar if properties: gravatar_url += "?" + urllib.urlencode(properties) return gravatar_url ```
[ { "content": "Repeat the full code snippet:\n```python\n# coding: utf-8\n\n\"\"\"\n Onshape REST API\n\n The Onshape REST API consumed by all clients. # noqa: E501\n\n The version of the OpenAPI document: 1.113\n Contact: api-support@onshape.zendesk.com\n Generated by: https://openapi-generator....
[ { "content": "Repeat the full code snippet:\n<|memory_start|>```python\n# coding: utf-8\n\n\"\"\"\n Onshape REST API\n\n The Onshape REST API consumed by all clients. # noqa: E501\n\n The version of the OpenAPI document: 1.113\n Contact: api-support@onshape.zendesk.com\n Generated by: https://op...
```python # coding: utf-8 """ Onshape REST API The Onshape REST API consumed by all clients. # noqa: E501 The version of the OpenAPI document: 1.113 Contact: api-support@onshape.zendesk.com Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 import sys # noqa: F401 import six # noqa: F401 import nulltype # noqa: F401 from onshape_client.oas.model_utils import ( # noqa: F401 ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) class BTModifiableEntityOnlyFilter1593AllOf(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = {} additional_properties_type = None @staticmethod def openapi_types(): """ This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "bt_type": (str,), # noqa: E501 "modifiable_only": (bool,), # noqa: E501 } @staticmethod def discriminator(): return None attribute_map = { "bt_type": "btType", # noqa: E501 "modifiable_only": "modifiableOnly", # noqa: E501 } @staticmethod def _composed_schemas(): return None required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", ] ) def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): # noqa: E501 """bt_modifiable_entity_only_filter1593_all_of.BTModifiableEntityOnlyFilter1593AllOf - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. bt_type (str): [optional] # noqa: E501 modifiable_only (bool): [optional] # noqa: E501 """ self._data_store = {} self._check_type = _check_type self._from_server = _from_server self._path_to_item = _path_to_item self._configuration = _configuration for var_name, var_value in six.iteritems(kwargs): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value) ```
[ { "content": "Here is the code block:\n```python\n# Copyright 2018-2020 by Christopher C. Little.\n# This file is part of Abydos.\n#\n# Abydos is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either v...
[ { "content": "Here is the code block:\n<|memory_start|>```python\n# Copyright 2018-2020 by Christopher C. Little.\n# This file is part of Abydos.\n#\n# Abydos is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foun...
```python # Copyright 2018-2020 by Christopher C. Little. # This file is part of Abydos. # # Abydos is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Abydos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Abydos. If not, see <http://www.gnu.org/licenses/>. """abydos.tests.fingerprint.test_fingerprint_occurrence. This module contains unit tests for abydos.fingerprint.Occurrence """ import unittest from abydos.fingerprint import Occurrence class OccurrenceFingerprintTestCases(unittest.TestCase): """Test Cisłak & Grabowski's occurrence fingerprint functions. abydos.fingerprint.Occurrence """ fp = Occurrence() def test_occurrence_fingerprint(self): """Test abydos.fingerprint.Occurrence.""" # Base case self.assertEqual(self.fp.fingerprint(''), '0' * 16) # https://arxiv.org/pdf/1711.08475.pdf self.assertEqual(self.fp.fingerprint('instance'), '1110111000010000') self.assertEqual(self.fp.fingerprint('inst'), '0100111000000000') self.assertEqual( Occurrence(15).fingerprint('instance'), '111011100001000' ) self.assertEqual( Occurrence(32).fingerprint('instance'), '11101110000100000000000000000000', ) self.assertEqual( Occurrence(64).fingerprint('instance'), '11101110000100000000000000000000' + '0' * 32, ) if __name__ == '__main__': unittest.main() ```
[ { "content": "Replicate the source code:\n```python\nimport bpy\nimport pyperclip\n\nfrom math import degrees\n\n\nclass DataBuffer(bpy.types.Operator):\n \"\"\"Export data Position, Rotation and Scale of all selected element\"\"\"\n bl_idname = \"object.data_buffer\"\n bl_label = \"Paste information b...
[ { "content": "Replicate the source code:\n<|memory_start|>```python\nimport bpy\nimport pyperclip\n\nfrom math import degrees\n\n\nclass DataBuffer(bpy.types.Operator):\n \"\"\"Export data Position, Rotation and Scale of all selected element\"\"\"\n bl_idname = \"object.data_buffer\"\n bl_label = \"Pas...
```python import bpy import pyperclip from math import degrees class DataBuffer(bpy.types.Operator): """Export data Position, Rotation and Scale of all selected element""" bl_idname = "object.data_buffer" bl_label = "Paste information buffer" def execute(self, context): objs = context.selected_objects string_data_prefixe = 'Begin Map \n'' Begin Level \n' string_data_suffixe = ' End Level\n''Begin Surface\n''End ' \ 'Surface\n''End Map' string_data = "" for element in objs: if element is not None: position_x = str(round(element.location.x * 100, 2)) position_y = str(round(element.location.y * -100, 2)) position_z = str(round(element.location.z * 100, 2)) rotation_pitch = str(round(degrees(element.rotation_euler.y), 2)) rotation_yaw = str(round(degrees(element.rotation_euler.z), 2)* -1) rotation_roll = str(round(degrees(element.rotation_euler.x), 2)) string_data = string_data + \ ' Begin Actor '\ 'Class=StaticMeshActor '\ 'Name=' + element.name + ' ' \ 'Archetype=StaticMeshActor'\ '\'/Script/Engine.Default__StaticMeshActor\'\n'\ ' Begin Object Class=StaticMeshComponent '\ 'Name=StaticMeshComponent0 '\ 'ObjName=StaticMeshComponent0 ' \ 'Archetype=StaticMeshComponent'\ '\'/Script/Engine.Default__StaticMeshActor:StaticMeshComponent0' \ '\'\n'\ ' End Object\n'\ ' Begin Object '\ 'Name=StaticMeshComponent0\n'\ ' StaticMesh=StaticMesh\'/Engine/EditorMeshes/EditorCube' \ '.EditorCube\' \n'\ ' RelativeLocation=(X=' + position_x + ',Y=' + \ position_y + ',Z=' + position_z + ')\n'\ ' RelativeScale3D=(X=' + str(round(element.scale.x, 2)) + ',' \ 'Y=' + str(round(element.scale.y, 2)) + ',' \ 'Z=' + \ str(round(element.scale.z, 2)) + ')\n'\ ' RelativeRotation=(Pitch=' + rotation_pitch + ',Yaw=' + \ rotation_yaw + ',' \ 'Roll=' + \ rotation_roll + ')\n'\ ' CustomProperties\n' \ ' End Object\n' \ ' StaticMeshComponent=StaticMeshComponent0\n' \ ' Components(0)=StaticMeshComponent0\n' \ ' RootComponent=StaticMeshComponent0\n' \ ' ActorLabel="' + element.name + '"\n' \ ' End Actor\n' \ else: self.report({'WARNING'}, "Select an object(s).") return {'CANCELLED'} string_complete = string_data_prefixe + string_data \ + string_data_suffixe # copyBuffer(objs[0].name) pyperclip.copy(string_complete) print(string_complete) self.report({'INFO'}, "Data copied on your Buffer.") return {'FINISHED'} def register(): bpy.utils.register_class(DataBuffer) def unregister(): bpy.utils.unregister_class(DataBuffer) if __name__ == "__main__": register() ```
[ { "content": "Repeat the following code:\n```python\nimport pytest\nimport uqbar.strings\n\nimport supriya\n\n\ndef test_do_not_coerce_arguments():\n synth = supriya.realtime.Synth()\n group = supriya.realtime.Group()\n assert synth.node_id is None\n assert group.node_id is None\n request = supri...
[ { "content": "Repeat the following code:\n<|memory_start|>```python\nimport pytest\nimport uqbar.strings\n\nimport supriya\n\n\ndef test_do_not_coerce_arguments():\n synth = supriya.realtime.Synth()\n group = supriya.realtime.Group()\n assert synth.node_id is None\n assert group.node_id is None\n ...
```python import pytest import uqbar.strings import supriya def test_do_not_coerce_arguments(): synth = supriya.realtime.Synth() group = supriya.realtime.Group() assert synth.node_id is None assert group.node_id is None request = supriya.commands.SynthNewRequest( node_id=synth, synthdef=synth.synthdef, target_node_id=group ) assert request.node_id is synth assert request.target_node_id is group assert synth.node_id is None assert group.node_id is None with pytest.raises(TypeError): request.to_osc() def test_allocate_ids_before_remote_application(server): """ Local application allocates the synth's ID before we generate the OSC message. """ synth = supriya.realtime.Synth() group = supriya.realtime.Group().allocate() assert synth.node_id is None assert group.node_id == 1000 request = supriya.commands.SynthNewRequest( node_id=synth, synthdef=synth.synthdef, target_node_id=group ) assert request.node_id is synth assert request.target_node_id is group with server.osc_io.capture() as transcript: request.communicate() assert [(_.label, _.message) for _ in transcript] == [ ("S", supriya.osc.OscMessage(9, "default", 1001, 0, 1000)), ("R", supriya.osc.OscMessage("/n_go", 1001, 1000, -1, -1, 0)), ] assert synth.node_id == 1001 assert synth.parent is group assert synth.is_allocated def test_no_preexisting_synth_object(server): """ Communicating without a pre-existing synth creates that synth during local application. """ synthdef = supriya.assets.synthdefs.test.allocate() group = supriya.realtime.Group().allocate() request = supriya.commands.SynthNewRequest( node_id=666, synthdef=synthdef, target_node_id=group ) assert request.node_id == 666 with server.osc_io.capture() as transcript: request.communicate() assert [(_.label, _.message) for _ in transcript] == [ ("S", supriya.osc.OscMessage(9, "test", 666, 0, 1000)), ("R", supriya.osc.OscMessage("/n_go", 666, 1000, -1, -1, 0)), ] synth = server[666] assert synth.parent is group assert synth.synthdef is synthdef def test_bus_symbol_mapping(server): synthdef = supriya.assets.synthdefs.test.allocate() group = supriya.realtime.Group().allocate() request = supriya.commands.SynthNewRequest( node_id=666, synthdef=synthdef, target_node_id=group, amplitude="c0", frequency="a1", ) with server.osc_io.capture() as transcript: request.communicate() assert [(_.label, _.message) for _ in transcript] == [ ( "S", supriya.osc.OscMessage( 9, "test", 666, 0, 1000, "amplitude", "c0", "frequency", "a1" ), ), ("R", supriya.osc.OscMessage("/n_go", 666, 1000, -1, -1, 0)), ] synth = server[666] assert synth.parent is group assert synth.synthdef is synthdef assert str(synth.controls["amplitude"].value) == "c0" assert str(synth.controls["frequency"].value) == "a1" server_state = str(server.query_remote_nodes(True)) assert server_state == uqbar.strings.normalize( """ NODE TREE 0 group 1 group 1000 group 666 test amplitude: c0, frequency: a1 """ ) assert str(server.query_local_nodes(True)) == server_state ```
[ { "content": "Return the code exactly, with no changes:\n```python\n\"\"\"\nReader for the http://entilocali.opencontent.it/\n\"\"\"\n\nimport datetime\nimport posixpath\nimport urlparse\n\nimport requests\n\nfrom datacat.readers import BaseReader\nfrom datacat.schema import DATE_FORMAT\n\n\ndef urljoin(base, *...
[ { "content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n\"\"\"\nReader for the http://entilocali.opencontent.it/\n\"\"\"\n\nimport datetime\nimport posixpath\nimport urlparse\n\nimport requests\n\nfrom datacat.readers import BaseReader\nfrom datacat.schema import DATE_FORMAT\n\n\ndef...
```python """ Reader for the http://entilocali.opencontent.it/ """ import datetime import posixpath import urlparse import requests from datacat.readers import BaseReader from datacat.schema import DATE_FORMAT def urljoin(base, *parts): """A mix between urlparse.urljoin() and posixpath.join()""" return urlparse.urljoin(base, posixpath.join(*parts)) class TrentoEntiLocaliReader(BaseReader): default_conf = { 'url': 'http://entilocali.opencontent.it/', } @property def od_api_url(self): return urlparse.urljoin(self.conf['url'], '/api/opendata/v1/') def _list_datasets(self): url = urlparse.urljoin(self.od_api_url, 'dataset') response = requests.get(url) assert response.ok return response.json() def _get_dataset(self, dataset_id): url = urljoin(self.od_api_url, 'dataset', dataset_id) response = requests.get(url) assert response.ok return response.json() def _normalize_dataset(self, dataset): def date_to_iso(dt): dtm = datetime.datetime.fromtimestamp(float(dt)) return dtm.strftime(DATE_FORMAT) return { '_id': dataset['id'], '_type': 'dataset', 'title': dataset['title'], 'description': dataset.get('notes'), 'author': { 'name': dataset.get('author'), 'email': dataset.get('author_email'), }, 'maintainer': { 'name': dataset.get('maintainer'), 'email': dataset.get('maintainer_email'), }, 'category': dataset.get('categories'), 'license': dataset.get('license_id'), 'temporal_coverage': [ date_to_iso(dataset.get('from_time')), date_to_iso(dataset.get('to_time')), ], 'landing_page': dataset.get('url'), # The filter(bool, ...) is to exclude empty strings 'tags': filter(bool, [ t.strip() for t in dataset.get('tags', '').split(',') ]), 'distribution': [ self._normalize_distribution(d) for d in dataset['resources'] ], } def _normalize_distribution(self, distribution): return { # '_id': ... '_type': 'distribution', 'title': distribution['name'], 'description': distribution.get('description'), 'extra': { 'resource_type': distribution.get('resource_type'), 'format': distribution.get('format'), 'media_type': distribution.get('mimetype'), 'byte_size': distribution.get('size'), 'download_url': distribution.get('url'), }, } def crawl_data(self): for dataset_id in self._list_datasets(): dataset = self._get_dataset(dataset_id) yield self._normalize_dataset(dataset) ```
[ { "content": "```python\n#!/usr/bin/env python\n\n#####################################################################\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2011, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# mo...
[ { "content": "<|memory_start|>```python\n#!/usr/bin/env python\n\n#####################################################################\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2011, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with ...
```python #!/usr/bin/env python ##################################################################### # Software License Agreement (BSD License) # # Copyright (c) 2011, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. __author__ = "mferguson@willowgarage.com (Michael Ferguson)" import roslib; import rospy import thread import multiprocessing from serial import * import StringIO from std_msgs.msg import Time from rosserial_msgs.msg import * from rosserial_msgs.srv import * import diagnostic_msgs.msg import socket import time import struct import signal def load_pkg_module(package, directory): #check if its in the python path in_path = False path = sys.path pkg_src = package+'/src' #check for the source directory which # is added to path by roslib boostrapping for entry in sys.path: if pkg_src in entry: in_path = True if not in_path: roslib.load_manifest(package) try: m = __import__( package + '.' + directory ) except: rospy.logerr( "Cannot import package : %s"% package ) rospy.logerr( "sys.path was " + str(path) ) return None return m def load_message(package, message): m = load_pkg_module(package, 'msg') m2 = getattr(m, 'msg') return getattr(m2, message) def load_service(package,service): s = load_pkg_module(package, 'srv') s = getattr(s, 'srv') srv = getattr(s, service) mreq = getattr(s, service+"Request") mres = getattr(s, service+"Response") return srv,mreq,mres class Publisher: """ Publisher forwards messages from the serial device to ROS. """ def __init__(self, topic_info): """ Create a new publisher. """ self.topic = topic_info.topic_name # find message type package, message = topic_info.message_type.split('/') self.message = load_message(package, message) if self.message._md5sum == topic_info.md5sum: self.publisher = rospy.Publisher(self.topic, self.message) else: raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum) def handlePacket(self, data): """ Forward message to ROS network. """ m = self.message() m.deserialize(data) self.publisher.publish(m) class Subscriber: """ Subscriber forwards messages from ROS to the serial device. """ def __init__(self, topic_info, parent): self.topic = topic_info.topic_name self.id = topic_info.topic_id self.parent = parent # find message type package, message = topic_info.message_type.split('/') self.message = load_message(package, message) if self.message._md5sum == topic_info.md5sum: self.subscriber = rospy.Subscriber(self.topic, self.message, self.callback) else: raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum) def unregister(self): rospy.loginfo("Removing subscriber: %s", self.topic) self.subscriber.unregister() def callback(self, msg): """ Forward message to serial device. """ data_buffer = StringIO.StringIO() msg.serialize(data_buffer) self.parent.send(self.id, data_buffer.getvalue()) def unregister(self): self.subscriber.unregister() class ServiceServer: """ ServiceServer responds to requests from ROS. """ def __init__(self, topic_info, parent): self.topic = topic_info.topic_name self.parent = parent # find message type package, service = topic_info.message_type.split('/') s = load_pkg_module(package, 'srv') s = getattr(s, 'srv') self.mreq = getattr(s, service+"Request") self.mres = getattr(s, service+"Response") srv = getattr(s, service) self.service = rospy.Service(self.topic, srv, self.callback) # response message self.data = None def unregister(self): rospy.loginfo("Removing service: %s", self.topic) self.service.shutdown() def callback(self, req): """ Forward request to serial device. """ data_buffer = StringIO.StringIO() req.serialize(data_buffer) self.response = None if self.parent.send(self.id, data_buffer.getvalue()) >= 0: while self.response == None: pass return self.response def handlePacket(self, data): """ Forward response to ROS network. """ r = self.mres() r.deserialize(data) self.response = r class ServiceClient: """ ServiceServer responds to requests from ROS. """ def __init__(self, topic_info, parent): self.topic = topic_info.topic_name self.parent = parent # find message type package, service = topic_info.message_type.split('/') s = load_pkg_module(package, 'srv') s = getattr(s, 'srv') self.mreq = getattr(s, service+"Request") self.mres = getattr(s, service+"Response") srv = getattr(s, service) rospy.loginfo("Starting service client, waiting for service '" + self.topic + "'") rospy.wait_for_service(self.topic) self.proxy = rospy.ServiceProxy(self.topic, srv) def handlePacket(self, data): """ Forward request to ROS network. """ req = self.mreq() req.deserialize(data) # call service proxy resp = self.proxy(req) # serialize and publish data_buffer = StringIO.StringIO() resp.serialize(data_buffer) self.parent.send(self.id, data_buffer.getvalue()) class RosSerialServer: """ RosSerialServer waits for a socket connection then passes itself, forked as a new process, to SerialClient which uses it as a serial port. It continues to listen for additional connections. Each forked process is a new ros node, and proxies ros operations (e.g. publish/subscribe) from its connection to the rest of ros. """ def __init__(self, tcp_portnum, fork_server=False): print "Fork_server is: ", fork_server self.tcp_portnum = tcp_portnum self.fork_server = fork_server def listen(self): self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #bind the socket to a public host, and a well-known port self.serversocket.bind(("", self.tcp_portnum)) #become a server socket self.serversocket.listen(1) while True: #accept connections print "waiting for socket connection" (clientsocket, address) = self.serversocket.accept() #now do something with the clientsocket rospy.loginfo("Established a socket connection from %s on port %s" % (address)) self.socket = clientsocket self.isConnected = True if (self.fork_server == True): # if configured to launch server in a separate process rospy.loginfo("Forking a socket server process") process = multiprocessing.Process(target=self.startSocketServer, args=(address)) process.daemon = True process.start() rospy.loginfo("launched startSocketServer") else: rospy.loginfo("calling startSerialClient") self.startSerialClient() rospy.loginfo("startSerialClient() exited") def startSerialClient(self): client = SerialClient(self) try: client.run() except KeyboardInterrupt: pass except RuntimeError: rospy.loginfo("RuntimeError exception caught") self.isConnected = False except socket.error: rospy.loginfo("socket.error exception caught") self.isConnected = False finally: self.socket.close() for sub in client.subscribers.values(): sub.unregister() for srv in client.services.values(): srv.unregister() #pass def startSocketServer(self, port, address): rospy.loginfo("starting ROS Serial Python Node serial_node-%r" % (address,)) rospy.init_node("serial_node_%r" % (address,)) self.startSerialClient() def flushInput(self): pass def write(self, data): if (self.isConnected == False): return length = len(data) totalsent = 0 while totalsent < length: sent = self.socket.send(data[totalsent:]) if sent == 0: raise RuntimeError("RosSerialServer.write() socket connection broken") totalsent = totalsent + sent def read(self, rqsted_length): self.msg = '' if (self.isConnected == False): return self.msg while len(self.msg) < rqsted_length: chunk = self.socket.recv(rqsted_length - len(self.msg)) if chunk == '': raise RuntimeError("RosSerialServer.read() socket connection broken") self.msg = self.msg + chunk return self.msg def close(self): self.port.close() def inWaiting(self): try: # the caller checks just for <1, so we'll peek at just one byte chunk = self.socket.recv(1, socket.MSG_DONTWAIT|socket.MSG_PEEK) if chunk == '': raise RuntimeError("RosSerialServer.inWaiting() socket connection broken") return len(chunk) except socket.error, e: if e.args[0] == errno.EWOULDBLOCK: return 0 raise class SerialClient: """ ServiceServer responds to requests from the serial device. """ def __init__(self, port=None, baud=57600, timeout=5.0): """ Initialize node, connect to bus, attempt to negotiate topics. """ self.mutex = thread.allocate_lock() self.lastsync = rospy.Time(0) self.lastsync_lost = rospy.Time(0) self.timeout = timeout self.synced = False self.pub_diagnostics = rospy.Publisher('/diagnostics', diagnostic_msgs.msg.DiagnosticArray) if port== None: # no port specified, listen for any new port? pass elif hasattr(port, 'read'): #assume its a filelike object self.port=port else: # open a specific port try: self.port = Serial(port, baud, timeout=self.timeout*0.5) except SerialException as e: rospy.logerr("Error opening serial: %s", e) rospy.signal_shutdown("Error opening serial: %s" % e) raise SystemExit self.port.timeout = 0.01 # Edit the port timeout time.sleep(0.1) # Wait for ready (patch for Uno) # hydro introduces protocol ver2 which must match node_handle.h # The protocol version is sent as the 2nd sync byte emitted by each end self.protocol_ver1 = '\xff' self.protocol_ver2 = '\xfe' self.protocol_ver = self.protocol_ver2 self.publishers = dict() # id:Publishers self.subscribers = dict() # topic:Subscriber self.services = dict() # topic:Service self.buffer_out = -1 self.buffer_in = -1 self.callbacks = dict() # endpoints for creating new pubs/subs self.callbacks[TopicInfo.ID_PUBLISHER] = self.setupPublisher self.callbacks[TopicInfo.ID_SUBSCRIBER] = self.setupSubscriber # service client/servers have 2 creation endpoints (a publisher and a subscriber) self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_PUBLISHER] = self.setupServiceServerPublisher self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_SUBSCRIBER] = self.setupServiceServerSubscriber self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_PUBLISHER] = self.setupServiceClientPublisher self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_SUBSCRIBER] = self.setupServiceClientSubscriber # custom endpoints self.callbacks[TopicInfo.ID_PARAMETER_REQUEST] = self.handleParameterRequest self.callbacks[TopicInfo.ID_LOG] = self.handleLoggingRequest self.callbacks[TopicInfo.ID_TIME] = self.handleTimeRequest rospy.sleep(2.0) # TODO self.requestTopics() self.lastsync = rospy.Time.now() signal.signal(signal.SIGINT, self.txStopRequest) def requestTopics(self): """ Determine topics to subscribe/publish. """ self.port.flushInput() # request topic sync self.port.write("\xff" + self.protocol_ver + "\x00\x00\xff\x00\x00\xff") def txStopRequest(self, signal, frame): """ send stop tx request to arduino when receive SIGINT(Ctrl-c)""" self.port.flushInput() self.port.write("\xff" + self.protocol_ver + "\x00\x00\xff\x0b\x00\xf4") # tx_stop_request is x0b rospy.loginfo("Send tx stop request") sys.exit(0) def tryRead(self, length): try: bytes_read = self.port.read(length) if len(bytes_read) < length: rospy.logwarn("Serial Port read returned short (expected %d bytes, received %d instead)." % (length, len(bytes_read))) raise IOError() return bytes_read except Exception as e: rospy.logwarn("Serial Port read failure: %s", e) raise IOError() def run(self): """ Forward recieved messages to appropriate publisher. """ data = '' while not rospy.is_shutdown(): if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3): if (self.synced == True): rospy.logerr("Lost sync with device, restarting...") else: rospy.logerr("Unable to sync with device; possible link problem or link software version mismatch such as hydro rosserial_python with groovy Arduino") self.lastsync_lost = rospy.Time.now() self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "no sync with device") self.requestTopics() self.lastsync = rospy.Time.now() # This try-block is here because we make multiple calls to read(). Any one of them can throw # an IOError if there's a serial problem or timeout. In that scenario, a single handler at the # bottom attempts to reconfigure the topics. try: if self.port.inWaiting() < 1: time.sleep(0.001) continue flag = [0,0] flag[0] = self.tryRead(1) if (flag[0] != '\xff'): continue flag[1] = self.tryRead(1) if ( flag[1] != self.protocol_ver): self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client") rospy.logerr("Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client") protocol_ver_msgs = {'\xff': 'Rev 0 (rosserial 0.4 and earlier)', '\xfe': 'Rev 1 (rosserial 0.5+)', '\xfd': 'Some future rosserial version'} if (flag[1] in protocol_ver_msgs): found_ver_msg = 'Protocol version of client is ' + protocol_ver_msgs[flag[1]] else: found_ver_msg = "Protocol version of client is unrecognized" rospy.loginfo("%s, expected %s" % (found_ver_msg, protocol_ver_msgs[self.protocol_ver])) continue msg_len_bytes = self.tryRead(2) msg_length, = struct.unpack("<h", msg_len_bytes) msg_len_chk = self.tryRead(1) msg_len_checksum = sum(map(ord, msg_len_bytes)) + ord(msg_len_chk) if msg_len_checksum % 256 != 255: rospy.loginfo("wrong checksum for msg length, length %d" %(msg_length)) rospy.loginfo("chk is %d" % ord(msg_len_chk)) continue # topic id (2 bytes) topic_id_header = self.tryRead(2) topic_id, = struct.unpack("<h", topic_id_header) try: msg = self.tryRead(msg_length) except IOError: self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Packet Failed : Failed to read msg data") rospy.loginfo("Packet Failed : Failed to read msg data") rospy.loginfo("msg len is %d",len(msg)) raise # checksum for topic id and msg chk = self.tryRead(1) checksum = sum(map(ord, topic_id_header) ) + sum(map(ord, msg)) + ord(chk) if checksum % 256 == 255: self.synced = True try: self.callbacks[topic_id](msg) except KeyError: rospy.logerr("Tried to publish before configured, topic id %d" % topic_id) rospy.sleep(0.001) else: rospy.loginfo("wrong checksum for topic id and msg") except IOError: # One of the read calls had an issue. Just to be safe, request that the client # reinitialize their topics. self.requestTopics() def setPublishSize(self, bytes): if self.buffer_out < 0: self.buffer_out = bytes rospy.loginfo("Note: publish buffer size is %d bytes" % self.buffer_out) def setSubscribeSize(self, bytes): if self.buffer_in < 0: self.buffer_in = bytes rospy.loginfo("Note: subscribe buffer size is %d bytes" % self.buffer_in) def setupPublisher(self, data): """ Register a new publisher. """ try: msg = TopicInfo() msg.deserialize(data) pub = Publisher(msg) self.publishers[msg.topic_id] = pub self.callbacks[msg.topic_id] = pub.handlePacket self.setPublishSize(msg.buffer_size) rospy.loginfo("Setup publisher on %s [%s]" % (msg.topic_name, msg.message_type) ) except Exception as e: rospy.logerr("Creation of publisher failed: %s", e) def setupSubscriber(self, data): """ Register a new subscriber. """ try: msg = TopicInfo() msg.deserialize(data) if not msg.topic_name in self.subscribers.keys(): sub = Subscriber(msg, self) self.subscribers[msg.topic_name] = sub self.setSubscribeSize(msg.buffer_size) rospy.loginfo("Setup subscriber on %s [%s]" % (msg.topic_name, msg.message_type) ) elif msg.message_type != self.subscribers[msg.topic_name].message._type: old_message_type = self.subscribers[msg.topic_name].message._type self.subscribers[msg.topic_name].unregister() sub = Subscriber(msg, self) self.subscribers[msg.topic_name] = sub self.setSubscribeSize(msg.buffer_size) rospy.loginfo("Change the message type of subscriber on %s from [%s] to [%s]" % (msg.topic_name, old_message_type, msg.message_type) ) except Exception as e: rospy.logerr("Creation of subscriber failed: %s", e) def setupServiceServerPublisher(self, data): """ Register a new service server. """ try: msg = TopicInfo() msg.deserialize(data) self.setPublishSize(msg.buffer_size) try: srv = self.services[msg.topic_name] except: srv = ServiceServer(msg, self) rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) ) self.services[msg.topic_name] = srv if srv.mres._md5sum == msg.md5sum: self.callbacks[msg.topic_id] = srv.handlePacket else: raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum) except Exception as e: rospy.logerr("Creation of service server failed: %s", e) def setupServiceServerSubscriber(self, data): """ Register a new service server. """ try: msg = TopicInfo() msg.deserialize(data) self.setSubscribeSize(msg.buffer_size) try: srv = self.services[msg.topic_name] except: srv = ServiceServer(msg, self) rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) ) self.services[msg.topic_name] = srv if srv.mreq._md5sum == msg.md5sum: srv.id = msg.topic_id else: raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum) except Exception as e: rospy.logerr("Creation of service server failed: %s", e) def setupServiceClientPublisher(self, data): """ Register a new service client. """ try: msg = TopicInfo() msg.deserialize(data) self.setPublishSize(msg.buffer_size) try: srv = self.services[msg.topic_name] except: srv = ServiceClient(msg, self) rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) ) self.services[msg.topic_name] = srv if srv.mreq._md5sum == msg.md5sum: self.callbacks[msg.topic_id] = srv.handlePacket else: raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum) except Exception as e: rospy.logerr("Creation of service client failed: %s", e) def setupServiceClientSubscriber(self, data): """ Register a new service client. """ try: msg = TopicInfo() msg.deserialize(data) self.setSubscribeSize(msg.buffer_size) try: srv = self.services[msg.topic_name] except: srv = ServiceClient(msg, self) rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) ) self.services[msg.topic_name] = srv if srv.mres._md5sum == msg.md5sum: srv.id = msg.topic_id else: raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum) except Exception as e: rospy.logerr("Creation of service client failed: %s", e) def handleTimeRequest(self, data): """ Respond to device with system time. """ t = Time() t.data = rospy.Time.now() data_buffer = StringIO.StringIO() t.serialize(data_buffer) self.send( TopicInfo.ID_TIME, data_buffer.getvalue() ) self.lastsync = rospy.Time.now() def handleParameterRequest(self, data): """ Send parameters to device. Supports only simple datatypes and arrays of such. """ req = RequestParamRequest() req.deserialize(data) resp = RequestParamResponse() try: param = rospy.get_param(req.name) except KeyError: rospy.logerr("Parameter %s does not exist"%req.name) return if param == None: rospy.logerr("Parameter %s does not exist"%req.name) return if (type(param) == dict): rospy.logerr("Cannot send param %s because it is a dictionary"%req.name) return if (type(param) != list): param = [param] #check to make sure that all parameters in list are same type t = type(param[0]) for p in param: if t!= type(p): rospy.logerr('All Paramers in the list %s must be of the same type'%req.name) return if (t == int): resp.ints= param if (t == float): resp.floats=param if (t == str): resp.strings = param data_buffer = StringIO.StringIO() resp.serialize(data_buffer) self.send(TopicInfo.ID_PARAMETER_REQUEST, data_buffer.getvalue()) def handleLoggingRequest(self, data): """ Forward logging information from serial device into ROS. """ msg = Log() msg.deserialize(data) if (msg.level == Log.ROSDEBUG): rospy.logdebug(msg.msg) elif(msg.level== Log.INFO): rospy.loginfo(msg.msg) elif(msg.level== Log.WARN): rospy.logwarn(msg.msg) elif(msg.level== Log.ERROR): rospy.logerr(msg.msg) elif(msg.level==Log.FATAL): rospy.logfatal(msg.msg) def send(self, topic, msg): """ Send a message on a particular topic to the device. """ with self.mutex: length = len(msg) if self.buffer_in > 0 and length > self.buffer_in: rospy.logerr("Message from ROS network dropped: message larger than buffer.") print msg return -1 else: #modified frame : header(2 bytes) + msg_len(2 bytes) + msg_len_chk(1 byte) + topic_id(2 bytes) + msg(x bytes) + msg_topic_id_chk(1 byte) # second byte of header is protocol version msg_len_checksum = 255 - ( ((length&255) + (length>>8))%256 ) msg_checksum = 255 - ( ((topic&255) + (topic>>8) + sum([ord(x) for x in msg]))%256 ) data = "\xff" + self.protocol_ver + chr(length&255) + chr(length>>8) + chr(msg_len_checksum) + chr(topic&255) + chr(topic>>8) data = data + msg + chr(msg_checksum) self.port.write(data) return length def sendDiagnostics(self, level, msg_text): msg = diagnostic_msgs.msg.DiagnosticArray() status = diagnostic_msgs.msg.DiagnosticStatus() status.name = "rosserial_python" msg.header.stamp = rospy.Time.now() msg.status.append(status) status.message = msg_text status.level = level status.values.append(diagnostic_msgs.msg.KeyValue()) status.values[0].key="last sync" if self.lastsync.to_sec()>0: status.values[0].value=time.ctime(self.lastsync.to_sec()) else: status.values[0].value="never" status.values.append(diagnostic_msgs.msg.KeyValue()) status.values[1].key="last sync lost" status.values[1].value=time.ctime(self.lastsync_lost.to_sec()) self.pub_diagnostics.publish(msg) ```
[ { "content": "Here is the code block:\n```python\n#\n# linter.py\n# Linter for SublimeLinter3, a code checking framework for Sublime Text 3\n#\n# Written by Jon Surrell\n# Copyright (c) 2013 Jon Surrell\n#\n# License: MIT\n#\n\n\"\"\"This module exports the Stack Ghc plugin class.\"\"\"\n\nfrom SublimeLinter.li...
[ { "content": "Here is the code block:\n<|memory_start|>```python\n#\n# linter.py\n# Linter for SublimeLinter3, a code checking framework for Sublime Text 3\n#\n# Written by Jon Surrell\n# Copyright (c) 2013 Jon Surrell\n#\n# License: MIT\n#\n\n\"\"\"This module exports the Stack Ghc plugin class.\"\"\"\n\nfrom ...
```python # # linter.py # Linter for SublimeLinter3, a code checking framework for Sublime Text 3 # # Written by Jon Surrell # Copyright (c) 2013 Jon Surrell # # License: MIT # """This module exports the Stack Ghc plugin class.""" from SublimeLinter.lint import Linter, util from os.path import basename class StackGhc(Linter): """Provides an interface to stack ghc.""" syntax = ('haskell', 'haskell-sublimehaskell', 'literate haskell') cmd = ('stack', 'ghc', '--', '-fno-code', '-Wall', '-Wwarn', '-fno-helpful-errors') regex = ( r'^(?P<filename>.+):' r'(?P<line>\d+):(?P<col>\d+):' r'\s+(?P<warning>Warning:\s+)?(?P<message>.+)$' ) multiline = True # No stdin tempfile_suffix = { 'haskell': 'hs', 'haskell-sublimehaskell': 'hs', 'literate haskell': 'lhs' } # ghc writes errors to STDERR error_stream = util.STREAM_STDERR def split_match(self, match): """Override to ignore errors reported in imported files.""" match, line, col, error, warning, message, near = ( super().split_match(match) ) match_filename = basename(match.groupdict()['filename']) linted_filename = basename(self.filename) if match_filename != linted_filename: return None, None, None, None, None, '', None return match, line, col, error, warning, message, near ```
[ { "content": "Here is the script:\n```python\n\"\"\"\nfitpack --- curve and surface fitting with splines\n\nfitpack is based on a collection of Fortran routines DIERCKX\nby P. Dierckx (see http://www.netlib.org/dierckx/) transformed\nto double routines by Pearu Peterson.\n\"\"\"\n# Created by Pearu Peterson, Ju...
[ { "content": "Here is the script:\n<|memory_start|>```python\n\"\"\"\nfitpack --- curve and surface fitting with splines\n\nfitpack is based on a collection of Fortran routines DIERCKX\nby P. Dierckx (see http://www.netlib.org/dierckx/) transformed\nto double routines by Pearu Peterson.\n\"\"\"\n# Created by Pe...
```python """ fitpack --- curve and surface fitting with splines fitpack is based on a collection of Fortran routines DIERCKX by P. Dierckx (see http://www.netlib.org/dierckx/) transformed to double routines by Pearu Peterson. """ # Created by Pearu Peterson, June,August 2003 from __future__ import division, print_function, absolute_import __all__ = [ 'UnivariateSpline', 'InterpolatedUnivariateSpline', 'LSQUnivariateSpline', 'BivariateSpline', 'LSQBivariateSpline', 'SmoothBivariateSpline', 'LSQSphereBivariateSpline', 'SmoothSphereBivariateSpline', 'RectBivariateSpline', 'RectSphereBivariateSpline'] import warnings from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones import numpy as np from . import fitpack from . import dfitpack ################ Univariate spline #################### _curfit_messages = {1:""" The required storage space exceeds the available storage space, as specified by the parameter nest: nest too small. If nest is already large (say nest > m/2), it may also indicate that s is too small. The approximation returned is the weighted least-squares spline according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp gives the corresponding weighted sum of squared residuals (fp>s). """, 2:""" A theoretically impossible result was found during the iteration proces for finding a smoothing spline with fp = s: s too small. There is an approximation returned but the corresponding weighted sum of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""", 3:""" The maximal number of iterations maxit (set to 20 by the program) allowed for finding a smoothing spline with fp=s has been reached: s too small. There is an approximation returned but the corresponding weighted sum of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""", 10:""" Error on entry, no approximation returned. The following conditions must hold: xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1 if iopt=-1: xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe""" } class UnivariateSpline(object): """ One-dimensional smoothing spline fit to a given set of data points. Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `s` specifies the number of knots by specifying a smoothing condition. Parameters ---------- x : (N,) array_like 1-D array of independent input data. Must be increasing. y : (N,) array_like 1-D array of dependent input data, of the same length as `x`. w : (N,) array_like, optional Weights for spline fitting. Must be positive. If None (default), weights are all equal. bbox : (2,) array_like, optional 2-sequence specifying the boundary of the approximation interval. If None (default), ``bbox=[x[0], x[-1]]``. k : int, optional Degree of the smoothing spline. Must be <= 5. s : float or None, optional Positive smoothing factor used to choose the number of knots. Number of knots will be increased until the smoothing condition is satisfied: sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s If None (default), s=len(w) which should be a good value if 1/w[i] is an estimate of the standard deviation of y[i]. If 0, spline will interpolate through all data points. See Also -------- InterpolatedUnivariateSpline : Subclass with smoothing forced to 0 LSQUnivariateSpline : Subclass in which knots are user-selected instead of being set by smoothing condition splrep : An older, non object-oriented wrapping of FITPACK splev, sproot, splint, spalde BivariateSpline : A similar class for two-dimensional spline interpolation Notes ----- The number of data points must be larger than the spline degree `k`. Examples -------- >>> from numpy import linspace,exp >>> from numpy.random import randn >>> import matplotlib.pyplot as plt >>> from scipy.interpolate import UnivariateSpline >>> x = linspace(-3, 3, 100) >>> y = exp(-x**2) + randn(100)/10 >>> s = UnivariateSpline(x, y, s=1) >>> xs = linspace(-3, 3, 1000) >>> ys = s(xs) >>> plt.plot(x, y, '.-') >>> plt.plot(xs, ys) >>> plt.show() xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y. """ def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None): """ Input: x,y - 1-d sequences of data points (x must be in strictly ascending order) Optional input: w - positive 1-d sequence of weights bbox - 2-sequence specifying the boundary of the approximation interval. By default, bbox=[x[0],x[-1]] k=3 - degree of the univariate spline. s - positive smoothing factor defined for estimation condition: sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s Default s=len(w) which should be a good value if 1/w[i] is an estimate of the standard deviation of y[i]. """ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier data = dfitpack.fpcurf0(x,y,k,w=w, xb=bbox[0],xe=bbox[1],s=s) if data[-1] == 1: # nest too small, setting to maximum bound data = self._reset_nest(data) self._data = data self._reset_class() @classmethod def _from_tck(cls, tck): """Construct a spline object from given tck""" self = cls.__new__(cls) t, c, k = tck self._eval_args = tck #_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier self._data = (None,None,None,None,None,k,None,len(t),t, c,None,None,None,None) return self def _reset_class(self): data = self._data n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1] self._eval_args = t[:n],c[:n],k if ier == 0: # the spline returned has a residual sum of squares fp # such that abs(fp-s)/s <= tol with tol a relative # tolerance set to 0.001 by the program pass elif ier == -1: # the spline returned is an interpolating spline self._set_class(InterpolatedUnivariateSpline) elif ier == -2: # the spline returned is the weighted least-squares # polynomial of degree k. In this extreme case fp gives # the upper bound fp0 for the smoothing factor s. self._set_class(LSQUnivariateSpline) else: # error if ier == 1: self._set_class(LSQUnivariateSpline) message = _curfit_messages.get(ier,'ier=%s' % (ier)) warnings.warn(message) def _set_class(self, cls): self._spline_class = cls if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline, LSQUnivariateSpline): self.__class__ = cls else: # It's an unknown subclass -- don't change class. cf. #731 pass def _reset_nest(self, data, nest=None): n = data[10] if nest is None: k,m = data[5],len(data[0]) nest = m+k+1 # this is the maximum bound for nest else: if not n <= nest: raise ValueError("`nest` can only be increased") t, c, fpint, nrdata = [np.resize(data[n], nest) for n in [8,9,11,12]] args = data[:8] + (t,c,n,fpint,nrdata,data[13]) data = dfitpack.fpcurf1(*args) return data def set_smoothing_factor(self, s): """ Continue spline computation with the given smoothing factor s and with the knots found at the last call. """ data = self._data if data[6] == -1: warnings.warn('smoothing factor unchanged for' 'LSQ spline with fixed knots') return args = data[:6] + (s,) + data[7:] data = dfitpack.fpcurf1(*args) if data[-1] == 1: # nest too small, setting to maximum bound data = self._reset_nest(data) self._data = data self._reset_class() def __call__(self, x, nu=0): """ Evaluate spline (or its nu-th derivative) at positions x. Note: x can be unordered but the evaluation is more efficient if x is (partially) ordered. """ x = np.asarray(x) # empty input yields empty output if x.size == 0: return array([]) # if nu is None: # return dfitpack.splev(*(self._eval_args+(x,))) # return dfitpack.splder(nu=nu,*(self._eval_args+(x,))) return fitpack.splev(x, self._eval_args, der=nu) def get_knots(self): """ Return positions of (boundary and interior) knots of the spline. """ data = self._data k,n = data[5],data[7] return data[8][k:n-k] def get_coeffs(self): """Return spline coefficients.""" data = self._data k,n = data[5],data[7] return data[9][:n-k-1] def get_residual(self): """Return weighted sum of squared residuals of the spline approximation: ``sum((w[i] * (y[i]-s(x[i])))**2, axis=0)``. """ return self._data[10] def integral(self, a, b): """ Return definite integral of the spline between two given points. """ return dfitpack.splint(*(self._eval_args+(a,b))) def derivatives(self, x): """ Return all derivatives of the spline at the point x.""" d,ier = dfitpack.spalde(*(self._eval_args+(x,))) if not ier == 0: raise ValueError("Error code returned by spalde: %s" % ier) return d def roots(self): """ Return the zeros of the spline. Restriction: only cubic splines are supported by fitpack. """ k = self._data[5] if k == 3: z,m,ier = dfitpack.sproot(*self._eval_args[:2]) if not ier == 0: raise ValueError("Error code returned by spalde: %s" % ier) return z[:m] raise NotImplementedError('finding roots unsupported for ' 'non-cubic splines') def derivative(self, n=1): """ Construct a new spline representing the derivative of this spline. .. versionadded:: 0.13.0 Parameters ---------- n : int, optional Order of derivative to evaluate. Default: 1 Returns ------- spline : UnivariateSpline Spline of order k2=k-n representing the derivative of this spline. See Also -------- splder, antiderivative Examples -------- This can be used for finding maxima of a curve: >>> from scipy.interpolate import UnivariateSpline >>> x = np.linspace(0, 10, 70) >>> y = np.sin(x) >>> spl = UnivariateSpline(x, y, k=4, s=0) Now, differentiate the spline and find the zeros of the derivative. (NB: `sproot` only works for order 3 splines, so we fit an order 4 spline): >>> spl.derivative().roots() / np.pi array([ 0.50000001, 1.5 , 2.49999998]) This agrees well with roots :math:`\pi/2 + n\pi` of `cos(x) = sin'(x)`. """ tck = fitpack.splder(self._eval_args, n) return UnivariateSpline._from_tck(tck) def antiderivative(self, n=1): """ Construct a new spline representing the antiderivative of this spline. .. versionadded:: 0.13.0 Parameters ---------- n : int, optional Order of antiderivative to evaluate. Default: 1 Returns ------- spline : UnivariateSpline Spline of order k2=k+n representing the antiderivative of this spline. See Also -------- splantider, derivative Examples -------- >>> from scipy.interpolate import UnivariateSpline >>> x = np.linspace(0, np.pi/2, 70) >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2) >>> spl = UnivariateSpline(x, y, s=0) The derivative is the inverse operation of the antiderivative, although some floating point error accumulates: >>> spl(1.7), spl.antiderivative().derivative()(1.7) (array(2.1565429877197317), array(2.1565429877201865)) Antiderivative can be used to evaluate definite integrals: >>> ispl = spl.antiderivative() >>> ispl(np.pi/2) - ispl(0) 2.2572053588768486 This is indeed an approximation to the complete elliptic integral :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`: >>> from scipy.special import ellipk >>> ellipk(0.8) 2.2572053268208538 """ tck = fitpack.splantider(self._eval_args, n) return UnivariateSpline._from_tck(tck) class InterpolatedUnivariateSpline(UnivariateSpline): """ One-dimensional interpolating spline for a given set of data points. Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. Spline function passes through all provided points. Equivalent to `UnivariateSpline` with s=0. Parameters ---------- x : (N,) array_like Input dimension of data points -- must be increasing y : (N,) array_like input dimension of data points w : (N,) array_like, optional Weights for spline fitting. Must be positive. If None (default), weights are all equal. bbox : (2,) array_like, optional 2-sequence specifying the boundary of the approximation interval. If None (default), bbox=[x[0],x[-1]]. k : int, optional Degree of the smoothing spline. Must be 1 <= `k` <= 5. See Also -------- UnivariateSpline : Superclass -- allows knots to be selected by a smoothing condition LSQUnivariateSpline : spline for which knots are user-selected splrep : An older, non object-oriented wrapping of FITPACK splev, sproot, splint, spalde BivariateSpline : A similar class for two-dimensional spline interpolation Notes ----- The number of data points must be larger than the spline degree `k`. Examples -------- >>> from numpy import linspace,exp >>> from numpy.random import randn >>> from scipy.interpolate import InterpolatedUnivariateSpline >>> import matplotlib.pyplot as plt >>> x = linspace(-3, 3, 100) >>> y = exp(-x**2) + randn(100)/10 >>> s = InterpolatedUnivariateSpline(x, y) >>> xs = linspace(-3, 3, 1000) >>> ys = s(xs) >>> plt.plot(x, y, '.-') >>> plt.plot(xs, ys) >>> plt.show() xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y """ def __init__(self, x, y, w=None, bbox=[None]*2, k=3): """ Input: x,y - 1-d sequences of data points (x must be in strictly ascending order) Optional input: w - positive 1-d sequence of weights bbox - 2-sequence specifying the boundary of the approximation interval. By default, bbox=[x[0],x[-1]] k=3 - degree of the univariate spline. """ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier self._data = dfitpack.fpcurf0(x,y,k,w=w, xb=bbox[0],xe=bbox[1],s=0) self._reset_class() class LSQUnivariateSpline(UnivariateSpline): """ One-dimensional spline with explicit internal knots. Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `t` specifies the internal knots of the spline Parameters ---------- x : (N,) array_like Input dimension of data points -- must be increasing y : (N,) array_like Input dimension of data points t : (M,) array_like interior knots of the spline. Must be in ascending order and bbox[0]<t[0]<...<t[-1]<bbox[-1] w : (N,) array_like, optional weights for spline fitting. Must be positive. If None (default), weights are all equal. bbox : (2,) array_like, optional 2-sequence specifying the boundary of the approximation interval. If None (default), bbox=[x[0],x[-1]]. k : int, optional Degree of the smoothing spline. Must be 1 <= `k` <= 5. Raises ------ ValueError If the interior knots do not satisfy the Schoenberg-Whitney conditions See Also -------- UnivariateSpline : Superclass -- knots are specified by setting a smoothing condition InterpolatedUnivariateSpline : spline passing through all points splrep : An older, non object-oriented wrapping of FITPACK splev, sproot, splint, spalde BivariateSpline : A similar class for two-dimensional spline interpolation Notes ----- The number of data points must be larger than the spline degree `k`. Examples -------- >>> from numpy import linspace,exp >>> from numpy.random import randn >>> from scipy.interpolate import LSQUnivariateSpline >>> import matplotlib.pyplot as plt >>> x = linspace(-3,3,100) >>> y = exp(-x**2) + randn(100)/10 >>> t = [-1,0,1] >>> s = LSQUnivariateSpline(x,y,t) >>> xs = linspace(-3,3,1000) >>> ys = s(xs) >>> plt.plot(x, y, '.-') >>> plt.plot(xs, ys) >>> plt.show() xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y with knots [-3,-1,0,1,3] """ def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3): """ Input: x,y - 1-d sequences of data points (x must be in strictly ascending order) t - 1-d sequence of the positions of user-defined interior knots of the spline (t must be in strictly ascending order and bbox[0]<t[0]<...<t[-1]<bbox[-1]) Optional input: w - positive 1-d sequence of weights bbox - 2-sequence specifying the boundary of the approximation interval. By default, bbox=[x[0],x[-1]] k=3 - degree of the univariate spline. """ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier xb = bbox[0] xe = bbox[1] if xb is None: xb = x[0] if xe is None: xe = x[-1] t = concatenate(([xb]*(k+1),t,[xe]*(k+1))) n = len(t) if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0,axis=0): raise ValueError('Interior knots t must satisfy ' 'Schoenberg-Whitney conditions') data = dfitpack.fpcurfm1(x,y,k,t,w=w,xb=xb,xe=xe) self._data = data[:-3] + (None,None,data[-1]) self._reset_class() ################ Bivariate spline #################### class _BivariateSplineBase(object): """ Base class for Bivariate spline s(x,y) interpolation on the rectangle [xb,xe] x [yb, ye] calculated from a given set of data points (x,y,z). See Also -------- bisplrep, bisplev : an older wrapping of FITPACK BivariateSpline : implementation of bivariate spline interpolation on a plane grid SphereBivariateSpline : implementation of bivariate spline interpolation on a spherical grid """ def get_residual(self): """ Return weighted sum of squared residuals of the spline approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0) """ return self.fp def get_knots(self): """ Return a tuple (tx,ty) where tx,ty contain knots positions of the spline with respect to x-, y-variable, respectively. The position of interior and additional knots are given as t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively. """ return self.tck[:2] def get_coeffs(self): """ Return spline coefficients.""" return self.tck[2] def __call__(self, x, y, mth=None, dx=0, dy=0, grid=True): """ Evaluate the spline or its derivatives at given positions. Parameters ---------- x, y : array-like Input coordinates. If `grid` is False, evaluate the spline at points ``(x[i], y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting is obeyed. If `grid` is True: evaluate spline at the grid points defined by the coordinate arrays x, y. The arrays must be sorted to increasing order. dx : int Order of x-derivative .. versionadded:: 0.14.0 dy : int Order of y-derivative .. versionadded:: 0.14.0 grid : bool Whether to evaluate the results on a grid spanned by the input arrays, or at points specified by the input arrays. .. versionadded:: 0.14.0 mth : str Deprecated argument. Has no effect. """ x = np.asarray(x) y = np.asarray(y) if mth is not None: warnings.warn("The `mth` argument is deprecated and will be removed", FutureWarning) tx, ty, c = self.tck[:3] kx, ky = self.degrees if grid: if x.size == 0 or y.size == 0: return np.zeros((x.size, y.size), dtype=self.tck[2].dtype) if dx or dy: z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y) if not ier == 0: raise ValueError("Error code returned by parder: %s" % ier) else: z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y) if not ier == 0: raise ValueError("Error code returned by bispev: %s" % ier) else: # standard Numpy broadcasting if x.shape != y.shape: x, y = np.broadcast_arrays(x, y) shape = x.shape x = x.ravel() y = y.ravel() if x.size == 0 or y.size == 0: return np.zeros(shape, dtype=self.tck[2].dtype) if dx or dy: z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y) if not ier == 0: raise ValueError("Error code returned by pardeu: %s" % ier) else: z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y) if not ier == 0: raise ValueError("Error code returned by bispeu: %s" % ier) z = z.reshape(shape) return z _surfit_messages = {1:""" The required storage space exceeds the available storage space: nxest or nyest too small, or s too small. The weighted least-squares spline corresponds to the current set of knots.""", 2:""" A theoretically impossible result was found during the iteration process for finding a smoothing spline with fp = s: s too small or badly chosen eps. Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""", 3:""" the maximal number of iterations maxit (set to 20 by the program) allowed for finding a smoothing spline with fp=s has been reached: s too small. Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""", 4:""" No more knots can be added because the number of b-spline coefficients (nx-kx-1)*(ny-ky-1) already exceeds the number of data points m: either s or m too small. The weighted least-squares spline corresponds to the current set of knots.""", 5:""" No more knots can be added because the additional knot would (quasi) coincide with an old one: s too small or too large a weight to an inaccurate data point. The weighted least-squares spline corresponds to the current set of knots.""", 10:""" Error on entry, no approximation returned. The following conditions must hold: xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1 If iopt==-1, then xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""", -3:""" The coefficients of the spline returned have been computed as the minimal norm least-squares solution of a (numerically) rank deficient system (deficiency=%i). If deficiency is large, the results may be inaccurate. Deficiency may strongly depend on the value of eps.""" } class BivariateSpline(_BivariateSplineBase): """ Base class for bivariate splines. This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set of data points ``(x, y, z)``. This class is meant to be subclassed, not instantiated directly. To construct these splines, call either `SmoothBivariateSpline` or `LSQBivariateSpline`. See Also -------- UnivariateSpline : a similar class for univariate spline interpolation SmoothBivariateSpline : to create a BivariateSpline through the given points LSQBivariateSpline : to create a BivariateSpline using weighted least-squares fitting SphereBivariateSpline : bivariate spline interpolation in spherical cooridinates bisplrep : older wrapping of FITPACK bisplev : older wrapping of FITPACK """ def ev(self, xi, yi, dx=0, dy=0): """ Evaluate the spline at points Returns the interpolated value at ``(xi[i], yi[i]), i=0,...,len(xi)-1``. Parameters ---------- xi, yi : array-like Input coordinates. Standard Numpy broadcasting is obeyed. dx : int Order of x-derivative .. versionadded:: 0.14.0 dy : int Order of y-derivative .. versionadded:: 0.14.0 """ return self.__call__(xi, yi, dx=dx, dy=dy, grid=False) def integral(self, xa, xb, ya, yb): """ Evaluate the integral of the spline over area [xa,xb] x [ya,yb]. Parameters ---------- xa, xb : float The end-points of the x integration interval. ya, yb : float The end-points of the y integration interval. Returns ------- integ : float The value of the resulting integral. """ tx,ty,c = self.tck[:3] kx,ky = self.degrees return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb) class SmoothBivariateSpline(BivariateSpline): """ Smooth bivariate spline approximation. Parameters ---------- x, y, z : array_like 1-D sequences of data points (order is not important). w : array_like, optional Positive 1-D sequence of weights, of same length as `x`, `y` and `z`. bbox : array_like, optional Sequence of length 4 specifying the boundary of the rectangular approximation domain. By default, ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``. kx, ky : ints, optional Degrees of the bivariate spline. Default is 3. s : float, optional Positive smoothing factor defined for estimation condition: ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s`` Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an estimate of the standard deviation of ``z[i]``. eps : float, optional A threshold for determining the effective rank of an over-determined linear system of equations. `eps` should have a value between 0 and 1, the default is 1e-16. See Also -------- bisplrep : an older wrapping of FITPACK bisplev : an older wrapping of FITPACK UnivariateSpline : a similar class for univariate spline interpolation LSQUnivariateSpline : to create a BivariateSpline using weighted Notes ----- The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``. """ def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None, eps=None): xb,xe,yb,ye = bbox nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w, xb,xe,yb,ye, kx,ky,s=s, eps=eps,lwrk2=1) if ier > 10: # lwrk2 was to small, re-run nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w, xb,xe,yb,ye, kx,ky,s=s, eps=eps,lwrk2=ier) if ier in [0,-1,-2]: # normal return pass else: message = _surfit_messages.get(ier,'ier=%s' % (ier)) warnings.warn(message) self.fp = fp self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)] self.degrees = kx,ky class LSQBivariateSpline(BivariateSpline): """ Weighted least-squares bivariate spline approximation. Parameters ---------- x, y, z : array_like 1-D sequences of data points (order is not important). tx, ty : array_like Strictly ordered 1-D sequences of knots coordinates. w : array_like, optional Positive 1-D array of weights, of the same length as `x`, `y` and `z`. bbox : (4,) array_like, optional Sequence of length 4 specifying the boundary of the rectangular approximation domain. By default, ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``. kx, ky : ints, optional Degrees of the bivariate spline. Default is 3. s : float, optional Positive smoothing factor defined for estimation condition: ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s`` Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an estimate of the standard deviation of ``z[i]``. eps : float, optional A threshold for determining the effective rank of an over-determined linear system of equations. `eps` should have a value between 0 and 1, the default is 1e-16. See Also -------- bisplrep : an older wrapping of FITPACK bisplev : an older wrapping of FITPACK UnivariateSpline : a similar class for univariate spline interpolation SmoothBivariateSpline : create a smoothing BivariateSpline Notes ----- The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``. """ def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3, eps=None): nx = 2*kx+2+len(tx) ny = 2*ky+2+len(ty) tx1 = zeros((nx,),float) ty1 = zeros((ny,),float) tx1[kx+1:nx-kx-1] = tx ty1[ky+1:ny-ky-1] = ty xb,xe,yb,ye = bbox tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w, xb,xe,yb,ye, kx,ky,eps,lwrk2=1) if ier > 10: tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w, xb,xe,yb,ye, kx,ky,eps,lwrk2=ier) if ier in [0,-1,-2]: # normal return pass else: if ier < -2: deficiency = (nx-kx-1)*(ny-ky-1)+ier message = _surfit_messages.get(-3) % (deficiency) else: message = _surfit_messages.get(ier, 'ier=%s' % (ier)) warnings.warn(message) self.fp = fp self.tck = tx1, ty1, c self.degrees = kx, ky class RectBivariateSpline(BivariateSpline): """ Bivariate spline approximation over a rectangular mesh. Can be used for both smoothing and interpolating data. Parameters ---------- x,y : array_like 1-D arrays of coordinates in strictly ascending order. z : array_like 2-D array of data with shape (x.size,y.size). bbox : array_like, optional Sequence of length 4 specifying the boundary of the rectangular approximation domain. By default, ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``. kx, ky : ints, optional Degrees of the bivariate spline. Default is 3. s : float, optional Positive smoothing factor defined for estimation condition: ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s`` Default is ``s=0``, which is for interpolation. See Also -------- SmoothBivariateSpline : a smoothing bivariate spline for scattered data bisplrep : an older wrapping of FITPACK bisplev : an older wrapping of FITPACK UnivariateSpline : a similar class for univariate spline interpolation """ def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0): x, y = ravel(x), ravel(y) if not all(diff(x) > 0.0): raise TypeError('x must be strictly increasing') if not all(diff(y) > 0.0): raise TypeError('y must be strictly increasing') if not ((x.min() == x[0]) and (x.max() == x[-1])): raise TypeError('x must be strictly ascending') if not ((y.min() == y[0]) and (y.max() == y[-1])): raise TypeError('y must be strictly ascending') if not x.size == z.shape[0]: raise TypeError('x dimension of z must have same number of ' 'elements as x') if not y.size == z.shape[1]: raise TypeError('y dimension of z must have same number of ' 'elements as y') z = ravel(z) xb, xe, yb, ye = bbox nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb, ye, kx, ky, s) if not ier in [0, -1, -2]: msg = _surfit_messages.get(ier, 'ier=%s' % (ier)) raise ValueError(msg) self.fp = fp self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)] self.degrees = kx, ky _spherefit_messages = _surfit_messages.copy() _spherefit_messages[10] = """ ERROR. On entry, the input data are controlled on validity. The following restrictions must be satisfied: -1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1, 0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m kwrk >= m+(ntest-7)*(npest-7) if iopt=-1: 8<=nt<=ntest , 9<=np<=npest 0<tt(5)<tt(6)<...<tt(nt-4)<pi 0<tp(5)<tp(6)<...<tp(np-4)<2*pi if iopt>=0: s>=0 if one of these conditions is found to be violated,control is immediately repassed to the calling program. in that case there is no approximation returned.""" _spherefit_messages[-3] = """ WARNING. The coefficients of the spline returned have been computed as the minimal norm least-squares solution of a (numerically) rank deficient system (deficiency=%i, rank=%i). Especially if the rank deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large, the results may be inaccurate. They could also seriously depend on the value of eps.""" class SphereBivariateSpline(_BivariateSplineBase): """ Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a given set of data points (theta,phi,r). .. versionadded:: 0.11.0 See Also -------- bisplrep, bisplev : an older wrapping of FITPACK UnivariateSpline : a similar class for univariate spline interpolation SmoothUnivariateSpline : to create a BivariateSpline through the given points LSQUnivariateSpline : to create a BivariateSpline using weighted least-squares fitting """ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True): """ Evaluate the spline or its derivatives at given positions. Parameters ---------- theta, phi : array-like Input coordinates. If `grid` is False, evaluate the spline at points ``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting is obeyed. If `grid` is True: evaluate spline at the grid points defined by the coordinate arrays theta, phi. The arrays must be sorted to increasing order. dtheta : int Order of theta-derivative .. versionadded:: 0.14.0 dphi : int Order of phi-derivative .. versionadded:: 0.14.0 grid : bool Whether to evaluate the results on a grid spanned by the input arrays, or at points specified by the input arrays. .. versionadded:: 0.14.0 """ theta = np.asarray(theta) phi = np.asarray(phi) if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi): raise ValueError("requested theta out of bounds.") if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi): raise ValueError("requested phi out of bounds.") return _BivariateSplineBase.__call__(self, theta, phi, dx=dtheta, dy=dphi, grid=grid) def ev(self, theta, phi, dtheta=0, dphi=0): """ Evaluate the spline at points Returns the interpolated value at ``(theta[i], phi[i]), i=0,...,len(theta)-1``. Parameters ---------- theta, phi : array-like Input coordinates. Standard Numpy broadcasting is obeyed. dtheta : int Order of theta-derivative .. versionadded:: 0.14.0 dphi : int Order of phi-derivative .. versionadded:: 0.14.0 """ return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False) class SmoothSphereBivariateSpline(SphereBivariateSpline): """ Smooth bivariate spline approximation in spherical coordinates. .. versionadded:: 0.11.0 Parameters ---------- theta, phi, r : array_like 1-D sequences of data points (order is not important). Coordinates must be given in radians. Theta must lie within the interval (0, pi), and phi must lie within the interval (0, 2pi). w : array_like, optional Positive 1-D sequence of weights. s : float, optional Positive smoothing factor defined for estimation condition: ``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s`` Default ``s=len(w)`` which should be a good value if 1/w[i] is an estimate of the standard deviation of r[i]. eps : float, optional A threshold for determining the effective rank of an over-determined linear system of equations. `eps` should have a value between 0 and 1, the default is 1e-16. Notes ----- For more information, see the FITPACK_ site about this function. .. _FITPACK: http://www.netlib.org/dierckx/sphere.f Examples -------- Suppose we have global data on a coarse grid (the input data does not have to be on a grid): >>> theta = np.linspace(0., np.pi, 7) >>> phi = np.linspace(0., 2*np.pi, 9) >>> data = np.empty((theta.shape[0], phi.shape[0])) >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0. >>> data[1:-1,1], data[1:-1,-1] = 1., 1. >>> data[1,1:-1], data[-2,1:-1] = 1., 1. >>> data[2:-2,2], data[2:-2,-2] = 2., 2. >>> data[2,2:-2], data[-3,2:-2] = 2., 2. >>> data[3,3:-2] = 3. >>> data = np.roll(data, 4, 1) We need to set up the interpolator object >>> lats, lons = np.meshgrid(theta, phi) >>> from scipy.interpolate import SmoothSphereBivariateSpline >>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),s=3.5) As a first test, we'll see what the algorithm returns when run on the input coordinates >>> data_orig = lut(theta, phi) Finally we interpolate the data to a finer grid >>> fine_lats = np.linspace(0., np.pi, 70) >>> fine_lons = np.linspace(0., 2 * np.pi, 90) >>> data_smth = lut(fine_lats, fine_lons) >>> fig = plt.figure() >>> ax1 = fig.add_subplot(131) >>> ax1.imshow(data, interpolation='nearest') >>> ax2 = fig.add_subplot(132) >>> ax2.imshow(data_orig, interpolation='nearest') >>> ax3 = fig.add_subplot(133) >>> ax3.imshow(data_smth, interpolation='nearest') >>> plt.show() """ def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16): if np.issubclass_(w, float): w = ones(len(theta)) * w nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi, r, w=w, s=s, eps=eps) if not ier in [0, -1, -2]: message = _spherefit_messages.get(ier, 'ier=%s' % (ier)) raise ValueError(message) self.fp = fp self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)] self.degrees = (3, 3) class LSQSphereBivariateSpline(SphereBivariateSpline): """ Weighted least-squares bivariate spline approximation in spherical coordinates. .. versionadded:: 0.11.0 Parameters ---------- theta, phi, r : array_like 1-D sequences of data points (order is not important). Coordinates must be given in radians. Theta must lie within the interval (0, pi), and phi must lie within the interval (0, 2pi). tt, tp : array_like Strictly ordered 1-D sequences of knots coordinates. Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``. w : array_like, optional Positive 1-D sequence of weights, of the same length as `theta`, `phi` and `r`. eps : float, optional A threshold for determining the effective rank of an over-determined linear system of equations. `eps` should have a value between 0 and 1, the default is 1e-16. Notes ----- For more information, see the FITPACK_ site about this function. .. _FITPACK: http://www.netlib.org/dierckx/sphere.f Examples -------- Suppose we have global data on a coarse grid (the input data does not have to be on a grid): >>> theta = np.linspace(0., np.pi, 7) >>> phi = np.linspace(0., 2*np.pi, 9) >>> data = np.empty((theta.shape[0], phi.shape[0])) >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0. >>> data[1:-1,1], data[1:-1,-1] = 1., 1. >>> data[1,1:-1], data[-2,1:-1] = 1., 1. >>> data[2:-2,2], data[2:-2,-2] = 2., 2. >>> data[2,2:-2], data[-3,2:-2] = 2., 2. >>> data[3,3:-2] = 3. >>> data = np.roll(data, 4, 1) We need to set up the interpolator object. Here, we must also specify the coordinates of the knots to use. >>> lats, lons = np.meshgrid(theta, phi) >>> knotst, knotsp = theta.copy(), phi.copy() >>> knotst[0] += .0001 >>> knotst[-1] -= .0001 >>> knotsp[0] += .0001 >>> knotsp[-1] -= .0001 >>> from scipy.interpolate import LSQSphereBivariateSpline >>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),knotst,knotsp) As a first test, we'll see what the algorithm returns when run on the input coordinates >>> data_orig = lut(theta, phi) Finally we interpolate the data to a finer grid >>> fine_lats = np.linspace(0., np.pi, 70) >>> fine_lons = np.linspace(0., 2*np.pi, 90) >>> data_lsq = lut(fine_lats, fine_lons) >>> fig = plt.figure() >>> ax1 = fig.add_subplot(131) >>> ax1.imshow(data, interpolation='nearest') >>> ax2 = fig.add_subplot(132) >>> ax2.imshow(data_orig, interpolation='nearest') >>> ax3 = fig.add_subplot(133) >>> ax3.imshow(data_lsq, interpolation='nearest') >>> plt.show() """ def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16): if np.issubclass_(w, float): w = ones(len(theta)) * w nt_, np_ = 8 + len(tt), 8 + len(tp) tt_, tp_ = zeros((nt_,), float), zeros((np_,), float) tt_[4:-4], tp_[4:-4] = tt, tp tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_, w=w, eps=eps) if ier < -2: deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier message = _spherefit_messages.get(-3) % (deficiency, -ier) warnings.warn(message) elif not ier in [0, -1, -2]: message = _spherefit_messages.get(ier, 'ier=%s' % (ier)) raise ValueError(message) self.fp = fp self.tck = tt_, tp_, c self.degrees = (3, 3) _spfit_messages = _surfit_messages.copy() _spfit_messages[10] = """ ERROR: on entry, the input data are controlled on validity the following restrictions must be satisfied. -1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1, -1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0. -1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0. mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8, kwrk>=5+mu+mv+nuest+nvest, lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest) 0< u(i-1)<u(i)< pi,i=2,..,mu, -pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3)) 0<tu(5)<tu(6)<...<tu(nu-4)< pi 8<=nv<=min(nvest,mv+7) v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi the schoenberg-whitney conditions, i.e. there must be subset of grid co-ordinates uu(p) and vv(q) such that tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4 (iopt(2)=1 and iopt(3)=1 also count for a uu-value tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4 (vv(q) is either a value v(j) or v(j)+2*pi) if iopt(1)>=0: s>=0 if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7 if one of these conditions is found to be violated,control is immediately repassed to the calling program. in that case there is no approximation returned.""" class RectSphereBivariateSpline(SphereBivariateSpline): """ Bivariate spline approximation over a rectangular mesh on a sphere. Can be used for smoothing data. .. versionadded:: 0.11.0 Parameters ---------- u : array_like 1-D array of latitude coordinates in strictly ascending order. Coordinates must be given in radians and lie within the interval (0, pi). v : array_like 1-D array of longitude coordinates in strictly ascending order. Coordinates must be given in radians, and must lie within (0, 2pi). r : array_like 2-D array of data with shape ``(u.size, v.size)``. s : float, optional Positive smoothing factor defined for estimation condition (``s=0`` is for interpolation). pole_continuity : bool or (bool, bool), optional Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and ``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole will be 1 or 0 when this is True or False, respectively. Defaults to False. pole_values : float or (float, float), optional Data values at the poles ``u=0`` and ``u=pi``. Either the whole parameter or each individual element can be None. Defaults to None. pole_exact : bool or (bool, bool), optional Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the value is considered to be the right function value, and it will be fitted exactly. If False, the value will be considered to be a data value just like the other data values. Defaults to False. pole_flat : bool or (bool, bool), optional For the poles at ``u=0`` and ``u=pi``, specify whether or not the approximation has vanishing derivatives. Defaults to False. See Also -------- RectBivariateSpline : bivariate spline approximation over a rectangular mesh Notes ----- Currently, only the smoothing spline approximation (``iopt[0] = 0`` and ``iopt[0] = 1`` in the FITPACK routine) is supported. The exact least-squares spline approximation is not implemented yet. When actually performing the interpolation, the requested `v` values must lie within the same length 2pi interval that the original `v` values were chosen from. For more information, see the FITPACK_ site about this function. .. _FITPACK: http://www.netlib.org/dierckx/spgrid.f Examples -------- Suppose we have global data on a coarse grid >>> lats = np.linspace(10, 170, 9) * np.pi / 180. >>> lons = np.linspace(0, 350, 18) * np.pi / 180. >>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T, np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T We want to interpolate it to a global one-degree grid >>> new_lats = np.linspace(1, 180, 180) * np.pi / 180 >>> new_lons = np.linspace(1, 360, 360) * np.pi / 180 >>> new_lats, new_lons = np.meshgrid(new_lats, new_lons) We need to set up the interpolator object >>> from scipy.interpolate import RectSphereBivariateSpline >>> lut = RectSphereBivariateSpline(lats, lons, data) Finally we interpolate the data. The `RectSphereBivariateSpline` object only takes 1-D arrays as input, therefore we need to do some reshaping. >>> data_interp = lut.ev(new_lats.ravel(), ... new_lons.ravel()).reshape((360, 180)).T Looking at the original and the interpolated data, one can see that the interpolant reproduces the original data very well: >>> fig = plt.figure() >>> ax1 = fig.add_subplot(211) >>> ax1.imshow(data, interpolation='nearest') >>> ax2 = fig.add_subplot(212) >>> ax2.imshow(data_interp, interpolation='nearest') >>> plt.show() Chosing the optimal value of ``s`` can be a delicate task. Recommended values for ``s`` depend on the accuracy of the data values. If the user has an idea of the statistical errors on the data, she can also find a proper estimate for ``s``. By assuming that, if she specifies the right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly reproduces the function underlying the data, she can evaluate ``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``. For example, if she knows that the statistical errors on her ``r(i,j)``-values are not greater than 0.1, she may expect that a good ``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``. If nothing is known about the statistical error in ``r(i,j)``, ``s`` must be determined by trial and error. The best is then to start with a very large value of ``s`` (to determine the least-squares polynomial and the corresponding upper bound ``fp0`` for ``s``) and then to progressively decrease the value of ``s`` (say by a factor 10 in the beginning, i.e. ``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation shows more detail) to obtain closer fits. The interpolation results for different values of ``s`` give some insight into this process: >>> fig2 = plt.figure() >>> s = [3e9, 2e9, 1e9, 1e8] >>> for ii in xrange(len(s)): >>> lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii]) >>> data_interp = lut.ev(new_lats.ravel(), ... new_lons.ravel()).reshape((360, 180)).T >>> ax = fig2.add_subplot(2, 2, ii+1) >>> ax.imshow(data_interp, interpolation='nearest') >>> ax.set_title("s = %g" % s[ii]) >>> plt.show() """ def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None, pole_exact=False, pole_flat=False): iopt = np.array([0, 0, 0], dtype=int) ider = np.array([-1, 0, -1, 0], dtype=int) if pole_values is None: pole_values = (None, None) elif isinstance(pole_values, (float, np.float32, np.float64)): pole_values = (pole_values, pole_values) if isinstance(pole_continuity, bool): pole_continuity = (pole_continuity, pole_continuity) if isinstance(pole_exact, bool): pole_exact = (pole_exact, pole_exact) if isinstance(pole_flat, bool): pole_flat = (pole_flat, pole_flat) r0, r1 = pole_values iopt[1:] = pole_continuity if r0 is None: ider[0] = -1 else: ider[0] = pole_exact[0] if r1 is None: ider[2] = -1 else: ider[2] = pole_exact[1] ider[1], ider[3] = pole_flat u, v = np.ravel(u), np.ravel(v) if not np.all(np.diff(u) > 0.0): raise TypeError('u must be strictly increasing') if not np.all(np.diff(v) > 0.0): raise TypeError('v must be strictly increasing') if not u.size == r.shape[0]: raise TypeError('u dimension of r must have same number of ' 'elements as u') if not v.size == r.shape[1]: raise TypeError('v dimension of r must have same number of ' 'elements as v') if pole_continuity[1] is False and pole_flat[1] is True: raise TypeError('if pole_continuity is False, so must be ' 'pole_flat') if pole_continuity[0] is False and pole_flat[0] is True: raise TypeError('if pole_continuity is False, so must be ' 'pole_flat') r = np.ravel(r) nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider, u.copy(), v.copy(), r.copy(), r0, r1, s) if not ier in [0, -1, -2]: msg = _spfit_messages.get(ier, 'ier=%s' % (ier)) raise ValueError(msg) self.fp = fp self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)] self.degrees = (3, 3) ```
[ { "content": "```python\nfrom datetime import timedelta, datetime\n\nfrom airflow import DAG\nfrom airflow.contrib.operators.bigquery_operator import BigQueryOperator\nfrom airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator\nfrom airflow.contrib.operators.gcs_to_bq import GoogleClou...
[ { "content": "<|memory_start|>```python\nfrom datetime import timedelta, datetime\n\nfrom airflow import DAG\nfrom airflow.contrib.operators.bigquery_operator import BigQueryOperator\nfrom airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator\nfrom airflow.contrib.operators.gcs_to_bq i...
```python from datetime import timedelta, datetime from airflow import DAG from airflow.contrib.operators.bigquery_operator import BigQueryOperator from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator from airflow.contrib.operators.gcs_to_bq import GoogleCloudStorageToBigQueryOperator from dags.support import schemas seven_days_ago = datetime.combine(datetime.today() - timedelta(7), datetime.min.time()) default_args = { 'owner': 'airflow', 'depends_on_past': False, 'start_date': seven_days_ago, 'email': ['alex@vanboxel.be'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(minutes=30), } with DAG('v1_8_bigquery', schedule_interval=timedelta(days=1), default_args=default_args) as dag: bq_extract_one_day = BigQueryOperator( task_id='bq_extract_one_day', bql='gcp_smoke/gsob_extract_day.sql', destination_dataset_table= '{{var.value.gcq_dataset}}.gsod_partition{{ ds_nodash }}', write_disposition='WRITE_TRUNCATE', bigquery_conn_id='gcp_smoke', use_legacy_sql=False ) bq2gcp_avro = BigQueryToCloudStorageOperator( task_id='bq2gcp_avro', source_project_dataset_table='{{var.value.gcq_dataset}}.gsod_partition{{ ds_nodash }}', destination_cloud_storage_uris=[ 'gs://{{var.value.gcs_bucket}}/{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/{{ ds_nodash }}/part-*.avro' ], export_format='AVRO', bigquery_conn_id='gcp_smoke', ) bq2gcp_override = BigQueryToCloudStorageOperator( task_id='bq2gcp_override', source_project_dataset_table='{{var.value.gcq_dataset}}.gsod_partition{{ ds_nodash }}', destination_cloud_storage_uris=[ 'gs://{{var.value.gcs_bucket}}/{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/99999999/part-*.avro' ], export_format='AVRO', bigquery_conn_id='gcp_smoke', ) gcs2bq_avro_auto_schema = GoogleCloudStorageToBigQueryOperator( task_id='gcs2bq_avro_auto_schema', bucket='{{var.value.gcs_bucket}}', source_objects=[ '{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/{{ ds_nodash }}/part-*' ], destination_project_dataset_table='{{var.value.gcq_tempset}}.avro_auto_schema{{ ds_nodash }}', source_format='AVRO', create_disposition='CREATE_IF_NEEDED', write_disposition='WRITE_TRUNCATE', google_cloud_storage_conn_id='gcp_smoke', bigquery_conn_id='gcp_smoke' ) gcs2bq_avro_with_schema = GoogleCloudStorageToBigQueryOperator( task_id='gcs2bq_avro_with_schema', bucket='{{var.value.gcs_bucket}}', source_objects=[ '{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/{{ ds_nodash }}/part-*' ], destination_project_dataset_table='{{var.value.gcq_tempset}}.avro_with_schema{{ ds_nodash }}', source_format='AVRO', schema_fields=schemas.gsob(), create_disposition='CREATE_IF_NEEDED', write_disposition='WRITE_TRUNCATE', google_cloud_storage_conn_id='gcp_smoke', bigquery_conn_id='gcp_smoke' ) bq_extract_one_day >> bq2gcp_avro >> bq2gcp_override bq2gcp_avro >> gcs2bq_avro_auto_schema bq2gcp_avro >> gcs2bq_avro_with_schema ```
[ { "content": "Produce an exact reconstruction of the code:\n```python\n# -*- coding: utf-8 -*-\r\nimport markdown\r\nimport ho.pisa as pisa\r\nimport StringIO\r\nimport os\r\nimport re\r\nfrom Cheetah.Template import Template\r\nfrom tempfile import NamedTemporaryFile\r\n\r\ndebug = False\r\n\r\ndef markdown2pd...
[ { "content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\r\nimport markdown\r\nimport ho.pisa as pisa\r\nimport StringIO\r\nimport os\r\nimport re\r\nfrom Cheetah.Template import Template\r\nfrom tempfile import NamedTemporaryFile\r\n\r\ndebug = False\r\n\r\...
```python # -*- coding: utf-8 -*- import markdown import ho.pisa as pisa import StringIO import os import re from Cheetah.Template import Template from tempfile import NamedTemporaryFile debug = False def markdown2pdf(text, pdffile, cssfile='xhtml2pdf.css', src_dir='.', fontfile='arial.ttf', skipTo1st=False): global debug md = markdown.Markdown(extensions=['meta','footnotes']) html = md.convert(text) # post-process unofficial markup # 1) <p>*</p> --> <p class="blankpara">&#160;</p> # 2) quotation mark html = html.replace('<p>*</p>', '<p class="blankpara">&#160;</p>') html = re.sub(u'“ ?', "&#8220;", html) html = html.replace(u'”',"&#8221;") html = re.sub(u"‘ ?", "&#8216;", html) html = html.replace(u"’","&#8217;") if debug: open('test.html','w').write(html.encode('utf-8')) htmline = [] #-- Cover & Title Page cover_file = None title = None author = None cif = None if 'cover_url' in md.Meta: cover_url = md.Meta['cover_url'][0] if cover_url.startswith('http://'): import urllib cif = NamedTemporaryFile(delete=False) cif.write( urllib.urlopen(cover_url).read() ) cif.close() cover_file = cif.name else: cover_file = cover_url if cover_url.startswith('file://'): cover_file = cover_url[7:] if 'title' in md.Meta: title = md.Meta['title'][0].replace(', ','<br />') if 'author' in md.Meta: author = md.Meta['author'][0].replace(', ','<br />') cover_tmpl = open(os.path.join('template','pdf','coverpage.html'), 'r').read().decode('utf-8') coverpg_htm = str( Template(cover_tmpl, searchList=[ {'cover_url':cover_file,'title':title,'author':author} ]) ) htmline.append( unicode(coverpg_htm,'utf-8') ) #-- Body # correct image path for url in re.compile('<img [^>]*src="(.*?)"').findall(html): if url.startswith('http://') or os.path.isabs(url): pass else: html = html.replace(url, os.path.normpath(src_dir+'/'+url)) if skipTo1st: html = html[ html.find('<h1'): ] html = html.replace('<h1 />','<h1></h1>') htmline.append(html) #-- PDF generation css_tmpl = open(os.path.join('template','pdf',cssfile), 'r').read().decode('utf-8') target_css = str( Template(css_tmpl, searchList=[ {'font':'fonts/'+fontfile} ]) ) fp = file(pdffile,'wb') pdf = pisa.pisaDocument( StringIO.StringIO('\n'.join(htmline).encode('utf-8')), fp, #path=src_dir, # not working! #link_callback=fetch_resources, default_css=target_css, #xhtml=True, encoding='utf-8') fp.close() if cif and os.path.exists(cif.name): os.remove(cif.name) #if debug and not pdf.err: # pisa.startViewer(pdffile) # suppress ho.pisa loggin message import logging class PisaNullHandler(logging.Handler): def emit(self, record): pass logging.getLogger("ho.pisa").addHandler(PisaNullHandler()) if __name__ == "__main__": debug = True import os, sys outfile = os.path.splitext(sys.argv[1])[0] + ".pdf" text = unicode(open(sys.argv[1],'r'),'utf-8')[1:] markdown2pdf(text, outfile, fontfile='SeoulHangang.ttf') # vim:sw=4:ts=4:et ```
[ { "content": "Reconstruct the code exactly:\n```python\n\"\"\"\nBot code for creating chemical items in wikidata from UNII\n\nAdapted from: https://github.com/sebotic/cdk_pywrapper/blob/master/cdk_pywrapper/chemlib.py\n\"\"\"\nimport os\nimport re\nimport subprocess\nimport time\nimport zipfile\nimport pandas a...
[ { "content": "Reconstruct the code exactly:\n<|memory_start|>```python\n\"\"\"\nBot code for creating chemical items in wikidata from UNII\n\nAdapted from: https://github.com/sebotic/cdk_pywrapper/blob/master/cdk_pywrapper/chemlib.py\n\"\"\"\nimport os\nimport re\nimport subprocess\nimport time\nimport zipfile\...
```python """ Bot code for creating chemical items in wikidata from UNII Adapted from: https://github.com/sebotic/cdk_pywrapper/blob/master/cdk_pywrapper/chemlib.py """ import os import re import subprocess import time import zipfile import pandas as pd import wikidataintegrator.wdi_core as wdi_core data_folder = "unii_data" def load_unii(): url = 'http://fdasis.nlm.nih.gov/srs/download/srs/UNII_Data.zip' if not os.path.exists(data_folder): os.makedirs(data_folder) subprocess.check_call(["wget", "-N", "-P", data_folder, url]) with zipfile.ZipFile(os.path.join(data_folder, 'UNII_Data.zip'), 'r') as zf: zf.extractall(data_folder) for file in os.listdir(data_folder): if 'Records' in file: full_file_name = os.path.join(data_folder, file) os.rename(full_file_name, os.path.join(data_folder, 'unii_data.txt')) class UNIIMolecule(object): unii_path = os.path.join(data_folder, 'unii_data.txt') if not os.path.exists(unii_path): load_unii() unii_df = pd.read_csv(unii_path, dtype=str, sep='\t', low_memory=False) def __init__(self, unii=None, inchi_key=None, verbose=False): if unii: ind = UNIIMolecule.unii_df['UNII'].values == unii else: ind = UNIIMolecule.unii_df['INCHIKEY'].values == inchi_key self.data = UNIIMolecule.unii_df.loc[ind, :] if len(self.data.index) != 1: raise ValueError('Provided ID did not return a unique UNII') self.data_index = self.data.index[0] if verbose: x = self.data print(x.common_name) print(x.stdinchikey) print(x.stdinchi) print(x.csid) @property def stdinchikey(self): ikey = self.data.loc[self.data_index, 'INCHIKEY'] if pd.isnull(ikey) and pd.isnull(self.smiles): return None return ikey @property def stdinchi(self): if pd.isnull(self.smiles): return None @property def preferred_name(self): name = self.data.loc[self.data_index, 'PT'] return UNIIMolecule.label_converter(name) if pd.notnull(name) else None @property def smiles(self): smiles = self.data.loc[self.data_index, 'SMILES'] return smiles if pd.notnull(smiles) else None @property def molecule_type(self): molecule_type = self.data.loc[self.data_index, 'UNII_TYPE'] return molecule_type if pd.notnull(molecule_type) else None @property def unii(self): return self.data.loc[self.data_index, 'UNII'] @property def cas(self): cas = self.data.loc[self.data_index, 'RN'] return cas if pd.notnull(cas) else None @property def einecs(self): einecs = self.data.loc[self.data_index, 'EC'] return einecs if pd.notnull(einecs) else None @property def rxnorm(self): rxnorm = self.data.loc[self.data_index, 'RXCUI'] return rxnorm if pd.notnull(rxnorm) else None @property def nci(self): nci = self.data.loc[self.data_index, 'NCIT'] return nci if pd.notnull(nci) else None @property def umls(self): umls_cui = self.data.loc[self.data_index, 'UMLS_CUI'] return umls_cui if pd.notnull(umls_cui) else None @property def pubchem(self): pubchem = self.data.loc[self.data_index, 'PUBCHEM'] return pubchem if pd.notnull(pubchem) else None @property def label(self): item_label = self.preferred_name if self.preferred_name else self.unii return item_label def to_wikidata(self): refs = [[ wdi_core.WDItemID(value='Q6593799', prop_nr='P248', is_reference=True), # stated in wdi_core.WDExternalID(value=self.unii, prop_nr='P652', is_reference=True), # source element wdi_core.WDTime(time=time.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True) # retrieved ]] print('UNII Main label is', self.label) elements = { 'P652': self.unii, 'P2017': self.smiles, 'P235': self.stdinchikey, 'P231': self.cas, 'P232': self.einecs, 'P1748': self.nci, 'P3345': self.rxnorm } if self.smiles and len(self.smiles) > 400: del elements['P2017'] data = [] for k, v in elements.items(): if not v: continue print('{}:'.format(k), v) if isinstance(v, list) or isinstance(v, set): for x in v: data.append(wdi_core.WDString(prop_nr=k, value=x, references=refs)) else: data.append(wdi_core.WDString(prop_nr=k, value=v, references=refs)) return data @staticmethod def label_converter(label): label = label.lower() greek_codes = { '.alpha.': '\u03B1', '.beta.': '\u03B2', '.gamma.': '\u03B3', '.delta.': '\u03B4', '.epsilon.': '\u03B5', '.zeta.': '\u03B6 ', '.eta.': '\u03B7', '.theta.': '\u03B8', '.iota.': '\u03B9', '.kappa.': '\u03BA', '.lambda.': '\u03BB', '.mu.': '\u03BC', '.nu.': '\u03BD', '.xi.': '\u03BE', '.omicron.': '\u03BF', '.pi.': '\u03C0', '.rho.': '\u03C1', '.sigma.': '\u03C3', '.tau.': '\u03C4', '.upsilon.': '\u03C5', '.phi.': '\u03C6', '.chi.': '\u03C7', '.psi.': '\u03C8', '.omega.': '\u03C9', } for greek_letter, unicode in greek_codes.items(): if greek_letter in label: label = label.replace(greek_letter, unicode) match = re.compile('(^|[^a-z])([ezdlnhros]{1}|dl{1})[^a-z]{1}') while True: if re.search(match, label): replacement = label[re.search(match, label).start(): re.search(match, label).end()].upper() label = re.sub(match, repl=replacement, string=label, count=1) else: break splits = label.split(', ') splits.reverse() return ''.join(splits) ```
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom personas.models import *\nfrom territo...
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom personas.models import...
```python # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf import settings from django.db import models from django.contrib.auth.models import User from personas.models import * from territorios.models import * from establecimientos.models import * from mediosdetransporte.models import * from django.utils import timezone from django.core.urlresolvers import reverse from django import forms #from multiselectfield import MultiSelectField # Create your models here. OPCIONES_CONVOCADOPOR = ( ('NR','Nivel Regional'), ('NC','Nivel Central'), ('OT','Otro') ) class Financiagastosde(models.Model): nombre = models.CharField("Financia Gastos de", max_length=60, blank=False, null=False) def __str__(self): return self.nombre class Meta: ordering = ['nombre'] class Tipodecometido(models.Model): nombre = models.CharField("Tipo de Cometido", max_length=60, blank=False, null=False) def __str__(self): return self.nombre class Meta: ordering = ['nombre'] verbose_name_plural = 'Tipos de Cometidos' class Tramo(models.Model): tramo = models.CharField("Tramo", max_length=10, blank=False, null=False) porcentaje = models.CharField("Ponderación", max_length=3,choices=OPCIONES_PONDERACION) inicio = models.ForeignKey(Grado, related_name="inicial") fin = models.ForeignKey(Grado, related_name="final") monto = models.PositiveIntegerField("Monto", default=0) def __str__(self): return u'El monto es %s '%(self.monto) class Meta: ordering = ['tramo'] verbose_name_plural = 'Tramos' class Cometido(models.Model): rut = models.CharField("Rut",max_length=12,blank=False,null=False) nombre = models.CharField("Nombre Completo", max_length=180, blank=False, null=False) persona = models.ForeignKey(settings.AUTH_USER_MODEL, default=User) grado = models.CharField("Grado",max_length=60,blank=False,null=False) escalafon = models.CharField("Escalafon",max_length=60,blank=False,null=False) estamento = models.CharField("Estamento",max_length=60,blank=False,null=False) unidad = models.CharField("Unidad",max_length=60,blank=False,null=False) region = models.CharField("Región",max_length=60,blank=False,null=False) convocadopor = models.CharField("Convocado por", max_length=2, choices=OPCIONES_CONVOCADOPOR,blank=True, null=True) tipofinanciamiento = models.ManyToManyField(Financiagastosde,blank=True,verbose_name='Financia Gastos de') al100 = models.PositiveIntegerField("Días al 100%", default=0) al60 = models.PositiveIntegerField("Días al 60%", default=0) al50 = models.PositiveIntegerField("Días al 50%", default=0) al40 = models.PositiveIntegerField("Días al 40%", default=0) derechoaviatico = models.BooleanField("Con derecho a viático",default=False) diadesalida = models.DateField("Día de salida",blank=True, null=True) horadesalida = models.TimeField("Hora de salida",blank=True, null=True) diadellegada = models.DateField("Día de llegada",blank=True, null=True) horadellegada = models.TimeField("Hora de llegada",blank=True, null=True) viaaerea = models.BooleanField("Vía Aérea",default=False) lineaaerea = models.ForeignKey(LineaAerea,blank=True, null=True, verbose_name="Línea Aérea") viaffcc = models.BooleanField("Vía FFCC",default=False) viabus = models.BooleanField("Vía Bus",default=False) lineabus = models.ForeignKey(LineaBus,blank=True, null=True, verbose_name='Empresa de Bus') viavehiculofiscal = models.BooleanField("Vía Vehículo Fiscal",default=False) vehiculofiscal = models.ForeignKey(VehiculoFiscal,blank=True, null=True, verbose_name='Vehículo Fiscal') viavehiculoparticular = models.BooleanField("Vía Vehículo Particular",default=False) placapatente = models.CharField("Placa Patente",max_length=7,blank=True,null=True) viataxitransfers = models.BooleanField("Via Taxi o Transfers",default=False) viamaritima = models.BooleanField("Via Marítima",default=False) kminicial = models.PositiveIntegerField("Kilometraje Incial Real", blank=True,null=True) kmfinal = models.PositiveIntegerField("Kilometraje Final Estimado", blank=True,null=True) gastoscombustiblepeaje = models.BooleanField("Combustible - Peajes",default=False) gastosmovilizacion = models.BooleanField("Gastos de Movilización",default=False) gastosenvehiculoparticular = models.BooleanField("Vehiculo Particular",default=False) observaciones = models.TextField("Observaciones",blank=True,null=True) draft = models.BooleanField("Borrador",default=True) tipocometido = models.ForeignKey(Tipodecometido,blank=True,null=True,verbose_name='Tipo de Cometido') actualizado = models.DateTimeField('Actualizado',auto_now=True, auto_now_add=False) creado = models.DateField('Creado',auto_now = False, auto_now_add=True) def __str__(self): return u'%s '%(self.creado) class Meta: ordering = ['-actualizado'] verbose_name_plural = 'Cometidos' def get_absolute_url(self): return reverse("cometidos:detail", kwargs={"id": self.id}) class Resolucion(models.Model): numero = models.PositiveIntegerField("Número de Resolución", default=0) class Destino(models.Model): fecha = models.DateField("Fecha", blank=False,null=False) establecimiento = models.ForeignKey(Establecimiento) objetivo = models.CharField("Objetivo", max_length=250, blank=False, null=False) pernoctar = models.BooleanField("Con Pernoctar",default=False) cometido = models.ForeignKey(Cometido) ```
[ { "content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/usr/bin/env python3\n\n#Copyright 2018 OSIsoft, LLC\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License a...
[ { "content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/usr/bin/env python3\n\n#Copyright 2018 OSIsoft, LLC\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy ...
```python #!/usr/bin/env python3 #Copyright 2018 OSIsoft, LLC # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # #<http://www.apache.org/licenses/LICENSE-2.0> # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. # NOTE: this script was designed using the v1.0 # version of the OMF specification, as outlined here: # http://omf-docs.readthedocs.io/en/v1.0/index.html # NOTE: this script was designed to run on BeagleBone Blue. To learn # more about the BeagleBone Blue, visit https://beagleboard.org/blue, where # you can also find purchasing links # ************************************************************************ # Import necessary packages # ************************************************************************ # Import packages import json import time import platform import socket import datetime #import random # Used to generate sample data; comment out this line if real data is used import requests import urllib3 # Used to disable warnings about insecure SSL (optional) # Import any special packages needed for a particular hardware platform, # for example, for a Raspberry PI, # import RPi.GPIO as GPIO # This automatically initizalizes the robotics cape; to install this library, see # https://github.com/mcdeoliveira/rcpy import rcpy # ************************************************************************ # Specify constant values (names, target URLS, et centera) needed by the script # ************************************************************************ # Specify the name of this device, or simply use the hostname; this is the name # of the PI AF Element that will be created, and it'll be included in the names # of PI Points that get created as well #DEVICE_NAME = (socket.gethostname()) + "" DEVICE_NAME = "BBBlue Robot Controller 01" # Specify a device location (optional); this will be added as a static # string attribute to the AF Element that is created DEVICE_LOCATION = "IoT Test Lab" # Specify the name of the Assets type message; this will also end up becoming # part of the name of the PI AF Element template that is created; for example, this could be # "AssetsType_RaspberryPI" or "AssetsType_Dragonboard" # You will want to make this different for each general class of IoT module that you use ASSETS_MESSAGE_TYPE_NAME = DEVICE_NAME + "_assets_type" #ASSETS_MESSAGE_TYPE_NAME = "assets_type" + "IoT Device Model 74656" # An example # Similarly, specify the name of for the data values type; this should likewise be unique # for each general class of IoT device--for example, if you were running this # script on two different devices, each with different numbers and kinds of sensors, # you'd specify a different data values message type name # when running the script on each device. If both devices were the same, # you could use the same DATA_VALUES_MESSAGE_TYPE_NAME DATA_VALUES_MESSAGE_TYPE_NAME = DEVICE_NAME + "_data_values_type" #DATA_VALUES_MESSAGE_TYPE_NAME = "data_values_type" + "IoT Device Model 74656" # An example # Store the id of the container that will be used to receive live data values DATA_VALUES_CONTAINER_ID = DEVICE_NAME + "_data_values_container" # Specify the number of seconds to sleep in between value messages NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES = 2 # Specify whether you're sending data to OSIsoft cloud services or not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES = False # Specify the address of the destination endpoint; it should be of the form # http://<host/ip>:<port>/ingress/messages # For example, "https://myservername:8118/ingress/messages" TARGET_URL = "https://lopezpiserver:777/ingress/messages" # !!! Note: if sending data to OSIsoft cloud services, # uncomment the below line in order to set the target URL to the OCS OMF endpoint: #TARGET_URL = "https://qi-data.osisoft.com/api/omf" # Specify the producer token, a unique token used to identify and authorize a given OMF producer. Consult the OSIsoft Cloud Services or PI Connector Relay documentation for further information. PRODUCER_TOKEN = "OMFv1" #PRODUCER_TOKEN = "778408" # An example # !!! Note: if sending data to OSIsoft cloud services, the producer token should be the # security token obtained for a particular Tenant and Publisher; see # http://qi-docs.readthedocs.io/en/latest/OMF_Ingress_Specification.html#headers #PRODUCER_TOKEN = "" # ************************************************************************ # Specify options for sending web requests to the target # ************************************************************************ # If self-signed certificates are used (true by default), # do not verify HTTPS SSL certificates; normally, leave this as is VERIFY_SSL = False # Specify the timeout, in seconds, for sending web requests # (if it takes longer than this to send a message, an error will be thrown) WEB_REQUEST_TIMEOUT_SECONDS = 30 # ************************************************************************ # Helper function: run any code needed to initialize local sensors, if necessary for this hardware # ************************************************************************ # Below is where you can initialize any global variables that are needed by your applicatio; # certain sensors, for example, will require global interface or sensor variables # myExampleInterfaceKitGlobalVar = None # The following function is where you can insert specific initialization code to set up # sensors for a particular IoT module or platform def initialize_sensors(): print("\n--- Sensors initializing...") try: #For a raspberry pi, for example, to set up pins 4 and 5, you would add #GPIO.setmode(GPIO.BCM) #GPIO.setup(4, GPIO.IN) #GPIO.setup(5, GPIO.IN) # Set state to rcpy.RUNNING rcpy.set_state(rcpy.RUNNING) # Activate the magnetometer on the BeagleBone Blue rcpy.mpu9250.initialize(enable_magnetometer = True) print("--- Sensors initialized!") # In short, in this example, by default, # this function is called but doesn't do anything (it's just a placeholder) except Exception as ex: # Log any error, if it occurs print(str(datetime.datetime.now()) + " Error when initializing sensors: " + str(ex)) # ************************************************************************ # Helper function: REQUIRED: create a JSON message that contains sensor data values # ************************************************************************ # The following function you can customize to allow this script to send along any # number of different data values, so long as the values that you send here match # up with the values defined in the "DataValuesType" OMF message type (see the next section) # In this example, this function simply generates two random values for the sensor values, # but here is where you could change this function to reference a library that actually # reads from sensors attached to the device that's running the script def create_data_values_message(): # Read data from the BeagleBone Blue's built-in sensors boardTemperature = rcpy.mpu9250.read_imu_temp() * 9/5 + 32 accelRotationAndMagneticData = rcpy.mpu9250.read() # Get the current timestamp in ISO format timestamp = datetime.datetime.utcnow().isoformat() + 'Z' # Assemble a JSON object containing the streamId and any data values return [ { "containerid": DATA_VALUES_CONTAINER_ID, "values": [ { "Time": timestamp, # Again, in this example, # we're just sending along random values for these two "sensors" #"Raw Sensor Reading 1": 100*random.random(), #"Raw Sensor Reading 2": 100*random.random() # For the BeagleBone Blue, indexes 0, 1, and 2 correspond to X, Y, and Z # Moreover, we're dividing acceleration by 9.80665 to convert it to units of Gs "X-acceleration": accelRotationAndMagneticData['accel'][0]/9.80665, "Y-acceleration": accelRotationAndMagneticData['accel'][1]/9.80665, "Z-acceleration": accelRotationAndMagneticData['accel'][2]/9.80665, "X-rotation": accelRotationAndMagneticData['gyro'][0], "Y-rotation": accelRotationAndMagneticData['gyro'][1], "Z-rotation": accelRotationAndMagneticData['gyro'][2], "X-magnetic field": accelRotationAndMagneticData['mag'][0], "Y-magnetic field": accelRotationAndMagneticData['mag'][1], "Z-magnetic field": accelRotationAndMagneticData['mag'][2], "Board Temperature": boardTemperature # If you wanted to read, for example, the digital GPIO pins # 4 and 5 on a Raspberry PI, # you would add to the earlier package import section: # import RPi.GPIO as GPIO # then add the below 3 lines to the above initialize_sensors # function to set up the GPIO pins: # GPIO.setmode(GPIO.BCM) # GPIO.setup(4, GPIO.IN) # GPIO.setup(5, GPIO.IN) # and then lastly, you would change the two Raw Sensor reading lines above to # "Raw Sensor Reading 1": GPIO.input(4), # "Raw Sensor Reading 2": GPIO.input(5) } ] } ] # ************************************************************************ # Helper function: REQUIRED: wrapper function for sending an HTTPS message # ************************************************************************ # Define a helper function to allow easily sending web request messages; # this function can later be customized to allow you to port this script to other languages. # All it does is take in a data object and a message type, and it sends an HTTPS # request to the target OMF endpoint def send_omf_message_to_endpoint(action, message_type, message_json): try: # Assemble headers that contain the producer token and message type # Note: in this example, the only action that is used is "create", # which will work totally fine; # to expand this application, you could modify it to use the "update" # action to, for example, modify existing AF element template types web_request_header = { 'producertoken': PRODUCER_TOKEN, 'messagetype': message_type, 'action': action, 'messageformat': 'JSON', 'omfversion': '1.0' } # !!! Note: if desired, ucomment the below line to print the outgoing message print('\nOutgoing message: ' + json.dumps(message_json)); # Send the request, and collect the response; json.dumps is used to # properly format the message JSON so that it can be sent as a web request response = requests.post( TARGET_URL, headers=web_request_header, data=json.dumps(message_json), verify=VERIFY_SSL, timeout=WEB_REQUEST_TIMEOUT_SECONDS ) # Print a debug message, if desired; note: you should receive a # response code 200 or 202 if the request was successful! print( 'Response from sending a message of type ' + '"{0}" with action "{1}": {2} {3}'.format( message_type, action, response.status_code, response.text ) ) except Exception as ex: # Log any error, if it occurs print(str(datetime.datetime.now()) + " Error during web request: " + str(ex)) # ************************************************************************ # Turn off HTTPS warnings, if desired # (if the default certificate configuration was used by the PI Connector) # ************************************************************************ # Suppress insecure HTTPS warnings, if an untrusted certificate is used by the target endpoint # Remove if targetting trusted targets try: if not VERIFY_SSL: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) requests.packages.urllib3.disable_warnings() except Exception as ex: # Log any error, if it occurs print(str(datetime.datetime.now()) + " Possible non-fatal error when disabling SSL validation: " + str(ex)) print( '\n--- Setup: targeting endpoint "' + TARGET_URL + '"...' + '\n--- Now sending types, defining containers, and creating assets and links...' + '\n--- (Note: a successful message will return a 20X response code.)\n' ) # ************************************************************************ # Create a JSON packet to define the types of streams that will be sent # ************************************************************************ DYNAMIC_TYPES_MESSAGE_JSON = [ # ************************************************************************ # There are several different message types that will be used by this script, but # you can customize this script for your own needs by modifying the types: # First, you can modify the "AssetsType", which will allow you to customize which static # attributes are added to the new PI AF Element that will be created, and second, # you can modify the "DataValuesType", which will allow you to customize this script to send # additional sensor values, in addition to (or instead of) the two shown here # This values type is going to be used to send real-time values; feel free to rename the # values from "Raw Sensor Reading 1" to, say, "Temperature", or "Pressure" # Note: # all keywords ("id", "type", "classification", etc. are case sensitive!) # For a list of the specific keywords used in these messages, # see http://omf-docs.readthedocs.io/ { "id": DATA_VALUES_MESSAGE_TYPE_NAME, "type": "object", "classification": "dynamic", "properties": { "Time": { "format": "date-time", "type": "string", "isindex": True }, #"Raw Sensor Reading 1": {"type": "number"}, #"Raw Sensor Reading 2": {"type": "number"} "X-acceleration": {"type": "number", "description": "in Gs"}, "Y-acceleration": {"type": "number", "description": "in Gs"}, "Z-acceleration": {"type": "number", "description": "in Gs"}, "X-rotation": {"type": "number", "description": "in degrees per second"}, "Y-rotation": {"type": "number", "description": "in degrees per second"}, "Z-rotation": {"type": "number", "description": "in degrees per second"}, "X-magnetic field": {"type": "number", "description": "in microteslas"}, "Y-magnetic field": {"type": "number", "description": "in microteslas"}, "Z-magnetic field": {"type": "number", "description": "in microteslas"}, "Board Temperature": {"type": "number", "description": "in Fahrenheit"} # For example, to allow you to send a string-type live data value, # such as "Status", you would add #"Status": { # "type": "string" #} } } ] # ************************************************************************ # Send the DYNAMIC types message, so that these types can be referenced in all later messages # ************************************************************************ send_omf_message_to_endpoint("create", "Type", DYNAMIC_TYPES_MESSAGE_JSON) # !!! Note: if sending data to OCS, static types are not included! if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES: STATIC_TYPES_MESSAGE_JSON = [ # This asset type is used to define a PI AF Element that will be created; # this type also defines two static string attributes that will be created # as well; feel free to rename these or add additional # static attributes for each Element (PI Point attributes will be added later) # The name of this type will also end up being part of the name of the PI AF Element template # that is automatically created { "id": ASSETS_MESSAGE_TYPE_NAME, "type": "object", "classification": "static", "properties": { "Name": { "type": "string", "isindex": True }, "Device Type": { "type": "string" }, "Location": { "type": "string" }, "Data Ingress Method": { "type": "string" } # For example, to add a number-type static # attribute for the device model, you would add # "Model": { # "type": "number" #} } } ] # ************************************************************************ # Send the STATIC types message, so that these types can be referenced in all later messages # ************************************************************************ send_omf_message_to_endpoint("create", "Type", STATIC_TYPES_MESSAGE_JSON) # ************************************************************************ # Create a JSON packet to define containerids and the type # (using the types listed above) for each new data events container # ************************************************************************ # The device name that you specified earlier will be used as the AF Element name! NEW_AF_ELEMENT_NAME = DEVICE_NAME CONTAINERS_MESSAGE_JSON = [ { "id": DATA_VALUES_CONTAINER_ID, "typeid": DATA_VALUES_MESSAGE_TYPE_NAME } ] # ************************************************************************ # Send the container message, to instantiate this particular container; # we can now directly start sending data to it using its Id # ************************************************************************ send_omf_message_to_endpoint("create", "Container", CONTAINERS_MESSAGE_JSON) # !!! Note: if sending data to OCS, static types are not included! if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES: # ************************************************************************ # Create a JSON packet to containing the asset and # linking data for the PI AF asset that will be made # ************************************************************************ # Here is where you can specify values for the static PI AF attributes; # in this case, we're auto-populating the Device Type, # but you can manually hard-code in values if you wish # we also add the LINKS to be made, which will both position the new PI AF # Element, so it will show up in AF, and will associate the PI Points # that will be created with that Element ASSETS_AND_LINKS_MESSAGE_JSON = [ { # This will end up creating a new PI AF Element with # this specific name and static attribute values "typeid": ASSETS_MESSAGE_TYPE_NAME, "values": [ { "Name": NEW_AF_ELEMENT_NAME, "Device Type": ( platform.machine() + " - " + platform.platform() + " - " + platform.processor() ), "Location": DEVICE_LOCATION, "Data Ingress Method": "OMF" } ] }, { "typeid": "__Link", "values": [ # This first link will locate such a newly created AF Element under # the root PI element targeted by the PI Connector in your target AF database # This was specfied in the Connector Relay Admin page; note that a new # parent element, with the same name as the PRODUCER_TOKEN, will also be made { "Source": { "typeid": ASSETS_MESSAGE_TYPE_NAME, "index": "_ROOT" }, "Target": { "typeid": ASSETS_MESSAGE_TYPE_NAME, "index": NEW_AF_ELEMENT_NAME } }, # This second link will map new PI Points (created by messages # sent to the data values container) to a newly create element { "Source": { "typeid": ASSETS_MESSAGE_TYPE_NAME, "index": NEW_AF_ELEMENT_NAME }, "Target": { "containerid": DATA_VALUES_CONTAINER_ID } } ] } ] # ************************************************************************ # Send the message to create the PI AF asset; it won't appear in PI AF, # though, because it hasn't yet been positioned... # ************************************************************************ send_omf_message_to_endpoint("create", "Data", ASSETS_AND_LINKS_MESSAGE_JSON) # ************************************************************************ # Initialize sensors prior to sending data (if needed), using the function defined earlier # ************************************************************************ initialize_sensors() # ************************************************************************ # Finally, loop indefinitely, sending random events # conforming to the value type that we defined earlier # ************************************************************************ print( '\n--- Now sending live data every ' + str(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES) + ' second(s) for device "' + NEW_AF_ELEMENT_NAME + '"... (press CTRL+C to quit at any time)\n' ) if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES: print( '--- (Look for a new AF Element named "' + NEW_AF_ELEMENT_NAME + '".)\n' ) while True: # Call the custom function that builds a JSON object that # contains new data values; see the beginning of this script VALUES_MESSAGE_JSON = create_data_values_message() # Send the JSON message to the target URL send_omf_message_to_endpoint("create", "Data", VALUES_MESSAGE_JSON) # Send the next message after the required interval time.sleep(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES) ```
[ { "content": "Here is a code file:\n```python\nimport time\nimport functools\n\nclass TimeCost(object):\n def __init__(self, unit='s', precision=4, logger=None):\n self.start = None\n self.end = None\n self.total = 0\n self.unit = unit\n self.precision = precision\n ...
[ { "content": "Here is a code file:\n<|memory_start|>```python\nimport time\nimport functools\n\nclass TimeCost(object):\n def __init__(self, unit='s', precision=4, logger=None):\n self.start = None\n self.end = None\n self.total = 0\n self.unit = unit\n self.precision = pre...
```python import time import functools class TimeCost(object): def __init__(self, unit='s', precision=4, logger=None): self.start = None self.end = None self.total = 0 self.unit = unit self.precision = precision self.__unitfactor = {'s': 1, 'ms': 1000, 'us': 1000000} self.logger = logger def __call__(self, f): @functools.wraps(f) def wrapped(*args, **kwargs): with self: return f(*args, **kwargs) return wrapped def __enter__(self): if self.unit not in self.__unitfactor: raise KeyError('Unsupported time unit.') if self.precision < 0: raise KeyError('must gte 0') self.start = time.time() return self def __exit__(self, exc_type, exc_val, exc_tb): self.end = time.time() self.total = (self.end - self.start) * self.__unitfactor[self.unit] if self.precision != 0: self.total = round(self.total, self.precision) else: self.total = int(self.total) if self.logger: self.logger.info('this cost {0}{1}'.format(self.total, self.unit)) def __str__(self): return 'this cost {0}{1}'.format(self.total, self.unit) ```
[ { "content": "```python\n\"\"\"\n.. _tut_evoked_objects:\n\nThe :class:`Evoked <mne.Evoked>` data structure: evoked/averaged data\n=====================================================================\n\nThe :class:`Evoked <mne.Evoked>` data structure is mainly used for storing\naveraged data over trials. In MN...
[ { "content": "<|memory_start|>```python\n\"\"\"\n.. _tut_evoked_objects:\n\nThe :class:`Evoked <mne.Evoked>` data structure: evoked/averaged data\n=====================================================================\n\nThe :class:`Evoked <mne.Evoked>` data structure is mainly used for storing\naveraged data ov...
```python """ .. _tut_evoked_objects: The :class:`Evoked <mne.Evoked>` data structure: evoked/averaged data ===================================================================== The :class:`Evoked <mne.Evoked>` data structure is mainly used for storing averaged data over trials. In MNE the evoked objects are usually created by averaging epochs data with :func:`mne.Epochs.average`. """ import os.path as op import mne ############################################################################### # Here for convenience we read the evoked dataset from a file. data_path = mne.datasets.sample.data_path() fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') evokeds = mne.read_evokeds(fname, baseline=(None, 0), proj=True) print(evokeds) ############################################################################### # Notice that the reader function returned a list of evoked instances. This is # because you can store multiple categories into a single file. Here we have # categories of # ``['Left Auditory', 'Right Auditory', 'Left Visual', 'Right Visual']``. # We can also use ``condition`` parameter to read in only one category. evoked = mne.read_evokeds(fname, condition='Left Auditory') evoked.apply_baseline((None, 0)).apply_proj() print(evoked) ############################################################################### # If you're gone through the tutorials of raw and epochs datasets, you're # probably already familiar with the :class:`Info <mne.Info>` attribute. # There is nothing new or special with the ``evoked.info``. All the relevant # info is still there. print(evoked.info) print(evoked.times) ############################################################################### # The evoked data structure also contains some new attributes easily # accessible: print(evoked.nave) # Number of averaged epochs. print(evoked.first) # First time sample. print(evoked.last) # Last time sample. print(evoked.comment) # Comment on dataset. Usually the condition. print(evoked.kind) # Type of data, either average or standard_error. ############################################################################### # The data is also easily accessible. Since the evoked data arrays are usually # much smaller than raw or epochs datasets, they are preloaded into the memory # when the evoked object is constructed. You can access the data as a numpy # array. data = evoked.data print(data.shape) ############################################################################### # The data is arranged in an array of shape `(n_channels, n_times)`. Notice # that unlike epochs, evoked object does not support indexing. This means that # to access the data of a specific channel you must use the data array # directly. print('Data from channel {0}:'.format(evoked.ch_names[10])) print(data[10]) ############################################################################### # If you want to import evoked data from some other system and you have it in a # numpy array you can use :class:`mne.EvokedArray` for that. All you need is # the data and some info about the evoked data. For more information, see # :ref:`tut_creating_data_structures`. evoked = mne.EvokedArray(data, evoked.info, tmin=evoked.times[0]) evoked.plot() ############################################################################### # To write an evoked dataset to a file, use the :meth:`mne.Evoked.save` method. # To save multiple categories to a single file, see :func:`mne.write_evokeds`. ```
[ { "content": "```python\nimport sim, sim_items, main\nfrom util import *\nimport random\n\nclass ReinforcedPrefix(sim_items.Prefix):\n \"\"\"\n \"reinforced\" prefix boosts armor by increasing its PV or EV (whichever would\n normally be greater) by 1-3 points. \n \"\"\"\n def __init__(self):\n ...
[ { "content": "<|memory_start|>```python\nimport sim, sim_items, main\nfrom util import *\nimport random\n\nclass ReinforcedPrefix(sim_items.Prefix):\n \"\"\"\n \"reinforced\" prefix boosts armor by increasing its PV or EV (whichever would\n normally be greater) by 1-3 points. \n \"\"\"\n def __in...
```python import sim, sim_items, main from util import * import random class ReinforcedPrefix(sim_items.Prefix): """ "reinforced" prefix boosts armor by increasing its PV or EV (whichever would normally be greater) by 1-3 points. """ def __init__(self): super(ReinforcedPrefix, self).__init__( "reinforced", # name "Whoever crafted this %(wearable)s evidently used above-average " "material and paid great attention to reinforcing standard " "weak points." ) def initialize(self, wearable): if not isinstance(wearable, sim_items.Armor): raise ValueError('Tried to apply %r to non-armor item: %r' % (self, wearable)) super(ReinforcedPrefix, self).initialize(wearable) self.bonus = random.randint(1,3) if self.wearable.bonuses['evMod'] > self.wearable.bonuses['pvMod']: self.affectedMod = 'evMod' else: self.affectedMod = 'pvMod' self.wearable.bonuses.update({ self.affectedMod: \ self.wearable.bonuses[self.affectedMod] + self.bonus }) def remove(self, wearable): super(ReinforcedPrefix, self).remove(wearable) wearable.bonuses.update({ self.affectedMod: \ self.wearable.bonuses[self.affectedMod] - self.bonus }) #======================================== thingClass = ReinforcedPrefix ```
[ { "content": "Reconstruct the code file line-for-line, unmodified:\n```python\n# Copyright 2021 Nisaba Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://...
[ { "content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n# Copyright 2021 Nisaba Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n...
```python # Copyright 2021 Nisaba Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests that the sigma FST has the expected set of FST properties.""" import itertools import pynini from absl.testing import absltest from absl.testing import parameterized from nisaba.brahmic import util as u from nisaba.utils import test_util class FstPropertiesTest(parameterized.TestCase, test_util.FstPropertiesTestCase): @parameterized.parameters( itertools.product( u.SCRIPTS, (pynini.ACYCLIC, pynini.UNWEIGHTED, pynini.I_DETERMINISTIC, pynini.NO_EPSILONS, pynini.ACCESSIBLE, pynini.COACCESSIBLE, pynini.ACCEPTOR))) def test_sigma_utf8(self, script: str, prop: pynini.FstProperties): fst = u.OpenFstFromBrahmicFar('sigma', script, token_type='utf8') self.assertFstCompliesWithProperties(fst, prop) @parameterized.parameters( itertools.product( u.SCRIPTS, (pynini.CYCLIC, pynini.UNWEIGHTED, pynini.I_DETERMINISTIC, pynini.NO_EPSILONS, pynini.ACCESSIBLE, pynini.COACCESSIBLE, pynini.ACCEPTOR), ('byte', 'utf8'))) def test_wellformed(self, script: str, prop: pynini.FstProperties, token_type: str): fst = u.OpenFstFromBrahmicFar('wellformed', script, token_type=token_type) self.assertFstCompliesWithProperties(fst, prop) @parameterized.parameters( itertools.product(u.SCRIPTS + ['Brahmic'], (pynini.UNWEIGHTED, pynini.NO_EPSILONS, pynini.CYCLIC, pynini.ACCESSIBLE, pynini.COACCESSIBLE), ('byte', 'utf8'))) def test_nfc(self, script: str, prop: pynini.FstProperties, token_type: str): fst = u.OpenFstFromBrahmicFar('nfc', script, token_type=token_type) self.assertFstCompliesWithProperties(fst, prop) @parameterized.parameters( itertools.product(u.SCRIPTS, (pynini.UNWEIGHTED, pynini.NO_EPSILONS, pynini.CYCLIC, pynini.ACCESSIBLE, pynini.COACCESSIBLE), ('byte', 'utf8'))) def test_visual_norm(self, script: str, prop: pynini.FstProperties, token_type: str): fst = u.OpenFstFromBrahmicFar('visual_norm', script, token_type=token_type) self.assertFstCompliesWithProperties(fst, prop) @parameterized.parameters( itertools.product(u.FIXED_RULE_SCRIPTS, (pynini.ACCESSIBLE, pynini.COACCESSIBLE, pynini.CYCLIC, # TODO: Investigate why it is not deterministic # pynini.I_DETERMINISTIC, pynini.NO_EPSILONS, pynini.UNWEIGHTED), ('byte', 'utf8'))) def test_fixed(self, script: str, prop: pynini.FstProperties, token_type: str): fst = u.OpenFstFromBrahmicFar('fixed', script, token_type=token_type) self.assertFstCompliesWithProperties(fst, prop) if __name__ == '__main__': absltest.main() ```
[ { "content": "Here is some code:\n```python\n#!/usr/bin/env python2\nimport threading\nimport time\n# HTTP-related\nimport SimpleHTTPServer\nimport SocketServer\nimport os\n# websocket-related\nfrom gevent import monkey; monkey.patch_all()\nfrom ws4py.server.geventserver import WebSocketServer\nfrom ws4py.webso...
[ { "content": "Here is some code:\n<|memory_start|>```python\n#!/usr/bin/env python2\nimport threading\nimport time\n# HTTP-related\nimport SimpleHTTPServer\nimport SocketServer\nimport os\n# websocket-related\nfrom gevent import monkey; monkey.patch_all()\nfrom ws4py.server.geventserver import WebSocketServer\n...
```python #!/usr/bin/env python2 import threading import time # HTTP-related import SimpleHTTPServer import SocketServer import os # websocket-related from gevent import monkey; monkey.patch_all() from ws4py.server.geventserver import WebSocketServer from ws4py.websocket import WebSocket import random class HTTPThread(threading.Thread): daemon = True def run(self): os.chdir('data') class MyTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = MyTCPServer(('', 8000), SimpleHTTPServer.SimpleHTTPRequestHandler) server.serve_forever() class PrintWebSocket(WebSocket): def received_message(self, message): if random.random() < 0.2: self.send(r'{"output": "\r\nHere is some output!\r\n"}'); print message class WebsocketThread(threading.Thread): daemon = True def run(self): server = WebSocketServer(('127.0.0.1', 8001), websocket_class=PrintWebSocket) server.serve_forever() HTTPThread().start() WebsocketThread().start() while True: time.sleep(1) ```
[ { "content": "```python\nfrom __future__ import with_statement\nfrom cms.api import create_page\nfrom cms.toolbar.toolbar import CMSToolbar\nfrom cms.middleware.toolbar import ToolbarMiddleware\nfrom cms.test_utils.testcases import SettingsOverrideTestCase\nfrom cms.test_utils.util.context_managers import Setti...
[ { "content": "<|memory_start|>```python\nfrom __future__ import with_statement\nfrom cms.api import create_page\nfrom cms.toolbar.toolbar import CMSToolbar\nfrom cms.middleware.toolbar import ToolbarMiddleware\nfrom cms.test_utils.testcases import SettingsOverrideTestCase\nfrom cms.test_utils.util.context_manag...
```python from __future__ import with_statement from cms.api import create_page from cms.toolbar.toolbar import CMSToolbar from cms.middleware.toolbar import ToolbarMiddleware from cms.test_utils.testcases import SettingsOverrideTestCase from cms.test_utils.util.context_managers import SettingsOverride from django.contrib.auth.models import AnonymousUser, User, Permission from django.test.client import RequestFactory class ToolbarTestBase(SettingsOverrideTestCase): def get_page_request(self, page, user, path=None, edit=False): path = page and page.get_absolute_url() or path if edit: path += '?edit' request = RequestFactory().get(path) request.session = {} request.user = user request.LANGUAGE_CODE = "en" if edit: request.GET = {'edit': None} else: request.GET = {'edit_off': None} request.current_page = page mid = ToolbarMiddleware() mid.process_request(request) return request def get_anon(self): return AnonymousUser() def get_staff(self): staff = User( username='staff', email='staff@staff.org', is_active=True, is_staff=True, ) staff.set_password('staff') staff.save() staff.user_permissions.add(Permission.objects.get(codename='change_page')) return staff def get_nonstaff(self): nonstaff = User( username='nonstaff', email='nonstaff@staff.org', is_active=True, is_staff=False, ) nonstaff.set_password('nonstaff') nonstaff.save() nonstaff.user_permissions.add(Permission.objects.get(codename='change_page')) return nonstaff def get_superuser(self): superuser = User( username='superuser', email='superuser@superuser.org', is_active=True, is_staff=True, is_superuser=True, ) superuser.set_password('superuser') superuser.save() return superuser class ToolbarTests(ToolbarTestBase): settings_overrides = {'CMS_PERMISSION': False} def test_toolbar_no_page_anon(self): request = self.get_page_request(None, self.get_anon(), '/') toolbar = CMSToolbar(request) items = toolbar.get_items() self.assertEqual(len(items), 0) def test_toolbar_no_page_staff(self): request = self.get_page_request(None, self.get_staff(), '/') toolbar = CMSToolbar(request) items = toolbar.get_items() # Logo + edit-mode + admin-menu + logout self.assertEqual(len(items), 2) self.assertEqual(len(items[0].get_context()['items']), 6) def test_toolbar_no_page_superuser(self): request = self.get_page_request(None, self.get_superuser(), '/') toolbar = CMSToolbar(request) items = toolbar.get_items() # Logo + edit-mode + admin-menu + logout self.assertEqual(len(items), 2) self.assertEqual(len(items[0].get_context()['items']), 7) def test_toolbar_anon(self): page = create_page('test', 'nav_playground.html', 'en') request = self.get_page_request(page, self.get_anon()) toolbar = CMSToolbar(request) items = toolbar.get_items() self.assertEqual(len(items), 0) def test_toolbar_nonstaff(self): page = create_page('test', 'nav_playground.html', 'en', published=True) request = self.get_page_request(page, self.get_nonstaff()) toolbar = CMSToolbar(request) items = toolbar.get_items() # Logo + edit-mode + logout self.assertEqual(len(items), 0) def test_toolbar_template_change_permission(self): with SettingsOverride(CMS_PERMISSIONS=True): page = create_page('test', 'nav_playground.html', 'en', published=True) request = self.get_page_request(page, self.get_nonstaff()) toolbar = CMSToolbar(request) items = toolbar.get_items() self.assertEqual([item for item in items if item.css_class_suffix == 'templates'], []) def test_toolbar_markup(self): create_page("toolbar-page", "nav_playground.html", "en", published=True) superuser = self.get_superuser() with self.login_user_context(superuser): response = self.client.get('/en/?edit') self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'nav_playground.html') self.assertContains(response, '<div id="cms_toolbar"') self.assertContains(response, 'cms.placeholders.js') self.assertContains(response, 'cms.placeholders.css') def test_show_toolbar_to_staff(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, self.get_staff(), '/') toolbar = CMSToolbar(request) self.assertTrue(toolbar.show_toolbar) def test_show_toolbar_with_edit(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, AnonymousUser(), edit=True) toolbar = CMSToolbar(request) self.assertTrue(toolbar.show_toolbar) def test_show_toolbar_without_edit(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, AnonymousUser(), edit=False) toolbar = CMSToolbar(request) self.assertFalse(toolbar.show_toolbar) def test_toolbar_publish_button(self): page = create_page('test', 'nav_playground.html', 'en', published=True) request = self.get_page_request(page, self.get_superuser(), edit=True) toolbar = CMSToolbar(request) self.assertTrue(toolbar.edit_mode) items = toolbar.get_items() self.assertEqual(len(items), 6) def test_toolbar_no_publish_button(self): page = create_page('test', 'nav_playground.html', 'en', published=True) request = self.get_page_request(page, self.get_staff(), edit=True) toolbar = CMSToolbar(request) self.assertTrue(page.has_change_permission(request)) self.assertFalse(page.has_publish_permission(request)) self.assertTrue(toolbar.edit_mode) items = toolbar.get_items() # Logo + edit-mode + templates + page-menu + admin-menu + logout self.assertEqual(len(items), 5) def test_toolbar_no_change_button(self): page = create_page('test', 'nav_playground.html', 'en', published=True) user = self.get_staff() user.user_permissions.all().delete() request = self.get_page_request(page, user, edit=True) toolbar = CMSToolbar(request) self.assertFalse(page.has_change_permission(request)) self.assertFalse(page.has_publish_permission(request)) items = toolbar.get_items() # Logo + page-menu + admin-menu + logout self.assertEqual(len(items), 2) self.assertEqual(len(items[0].get_context()['items']), 6) ```
[ { "content": "Here is a code snippet:\n```python\nimport time\nfrom struct import pack\nfrom typing import Optional\n\nfrom electrum import ecc\nfrom electrum.i18n import _\nfrom electrum.util import UserCancelled\nfrom electrum.keystore import bip39_normalize_passphrase\nfrom electrum.bip32 import BIP32Node, c...
[ { "content": "Here is a code snippet:\n<|memory_start|>```python\nimport time\nfrom struct import pack\nfrom typing import Optional\n\nfrom electrum import ecc\nfrom electrum.i18n import _\nfrom electrum.util import UserCancelled\nfrom electrum.keystore import bip39_normalize_passphrase\nfrom electrum.bip32 imp...
```python import time from struct import pack from typing import Optional from electrum import ecc from electrum.i18n import _ from electrum.util import UserCancelled from electrum.keystore import bip39_normalize_passphrase from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 from electrum.logging import Logger from electrum.plugin import runs_in_hwd_thread from electrum.plugins.hw_wallet.plugin import HardwareClientBase, HardwareHandlerBase class GuiMixin(object): # Requires: self.proto, self.device handler: Optional[HardwareHandlerBase] # ref: https://github.com/trezor/trezor-common/blob/44dfb07cfaafffada4b2ce0d15ba1d90d17cf35e/protob/types.proto#L89 messages = { 3: _("Confirm the transaction output on your {} device"), 4: _("Confirm internal entropy on your {} device to begin"), 5: _("Write down the seed word shown on your {}"), 6: _("Confirm on your {} that you want to wipe it clean"), 7: _("Confirm on your {} device the message to sign"), 8: _("Confirm the total amount spent and the transaction fee on your " "{} device"), 10: _("Confirm wallet address on your {} device"), 14: _("Choose on your {} device where to enter your passphrase"), 'default': _("Check your {} device to continue"), } def callback_Failure(self, msg): # BaseClient's unfortunate call() implementation forces us to # raise exceptions on failure in order to unwind the stack. # However, making the user acknowledge they cancelled # gets old very quickly, so we suppress those. The NotInitialized # one is misnamed and indicates a passphrase request was cancelled. if msg.code in (self.types.FailureType.PinCancelled, self.types.FailureType.ActionCancelled, self.types.FailureType.NotInitialized): raise UserCancelled() raise RuntimeError(msg.message) def callback_ButtonRequest(self, msg): message = self.msg if not message: message = self.messages.get(msg.code, self.messages['default']) self.handler.show_message(message.format(self.device), self.cancel) return self.proto.ButtonAck() def callback_PinMatrixRequest(self, msg): show_strength = True if msg.type == 2: msg = _("Enter a new PIN for your {}:") elif msg.type == 3: msg = (_("Re-enter the new PIN for your {}.\n\n" "NOTE: the positions of the numbers have changed!")) else: msg = _("Enter your current {} PIN:") show_strength = False pin = self.handler.get_pin(msg.format(self.device), show_strength=show_strength) if len(pin) > 9: self.handler.show_error(_('The PIN cannot be longer than 9 characters.')) pin = '' # to cancel below if not pin: return self.proto.Cancel() return self.proto.PinMatrixAck(pin=pin) def callback_PassphraseRequest(self, req): if req and hasattr(req, 'on_device') and req.on_device is True: return self.proto.PassphraseAck() if self.creating_wallet: msg = _("Enter a passphrase to generate this wallet. Each time " "you use this wallet your {} will prompt you for the " "passphrase. If you forget the passphrase you cannot " "access the bitcoins in the wallet.").format(self.device) else: msg = _("Enter the passphrase to unlock this wallet:") passphrase = self.handler.get_passphrase(msg, self.creating_wallet) if passphrase is None: return self.proto.Cancel() passphrase = bip39_normalize_passphrase(passphrase) ack = self.proto.PassphraseAck(passphrase=passphrase) length = len(ack.passphrase) if length > 50: self.handler.show_error(_("Too long passphrase ({} > 50 chars).").format(length)) return self.proto.Cancel() return ack def callback_PassphraseStateRequest(self, msg): return self.proto.PassphraseStateAck() def callback_WordRequest(self, msg): self.step += 1 msg = _("Step {}/24. Enter seed word as explained on " "your {}:").format(self.step, self.device) word = self.handler.get_word(msg) # Unfortunately the device can't handle self.proto.Cancel() return self.proto.WordAck(word=word) class SafeTClientBase(HardwareClientBase, GuiMixin, Logger): def __init__(self, handler, plugin, proto): assert hasattr(self, 'tx_api') # ProtocolMixin already constructed? HardwareClientBase.__init__(self, plugin=plugin) self.proto = proto self.device = plugin.device self.handler = handler self.tx_api = plugin self.types = plugin.types self.msg = None self.creating_wallet = False Logger.__init__(self) self.used() def __str__(self): return "%s/%s" % (self.label(), self.features.device_id) def label(self): return self.features.label def get_soft_device_id(self): return self.features.device_id def is_initialized(self): return self.features.initialized def is_pairable(self): return not self.features.bootloader_mode @runs_in_hwd_thread def has_usable_connection_with_device(self): try: res = self.ping("electrum pinging device") assert res == "electrum pinging device" except BaseException: return False return True def used(self): self.last_operation = time.time() def prevent_timeouts(self): self.last_operation = float('inf') @runs_in_hwd_thread def timeout(self, cutoff): '''Time out the client if the last operation was before cutoff.''' if self.last_operation < cutoff: self.logger.info("timed out") self.clear_session() @staticmethod def expand_path(n): return convert_bip32_path_to_list_of_uint32(n) @runs_in_hwd_thread def cancel(self): '''Provided here as in keepkeylib but not safetlib.''' self.transport.write(self.proto.Cancel()) def i4b(self, x): return pack('>I', x) @runs_in_hwd_thread def get_xpub(self, bip32_path, xtype): address_n = self.expand_path(bip32_path) creating = False node = self.get_public_node(address_n, creating).node return BIP32Node(xtype=xtype, eckey=ecc.ECPubkey(node.public_key), chaincode=node.chain_code, depth=node.depth, fingerprint=self.i4b(node.fingerprint), child_number=self.i4b(node.child_num)).to_xpub() @runs_in_hwd_thread def toggle_passphrase(self): if self.features.passphrase_protection: self.msg = _("Confirm on your {} device to disable passphrases") else: self.msg = _("Confirm on your {} device to enable passphrases") enabled = not self.features.passphrase_protection self.apply_settings(use_passphrase=enabled) @runs_in_hwd_thread def change_label(self, label): self.msg = _("Confirm the new label on your {} device") self.apply_settings(label=label) @runs_in_hwd_thread def change_homescreen(self, homescreen): self.msg = _("Confirm on your {} device to change your home screen") self.apply_settings(homescreen=homescreen) @runs_in_hwd_thread def set_pin(self, remove): if remove: self.msg = _("Confirm on your {} device to disable PIN protection") elif self.features.pin_protection: self.msg = _("Confirm on your {} device to change your PIN") else: self.msg = _("Confirm on your {} device to set a PIN") self.change_pin(remove) @runs_in_hwd_thread def clear_session(self): '''Clear the session to force pin (and passphrase if enabled) re-entry. Does not leak exceptions.''' self.logger.info(f"clear session: {self}") self.prevent_timeouts() try: super(SafeTClientBase, self).clear_session() except BaseException as e: # If the device was removed it has the same effect... self.logger.info(f"clear_session: ignoring error {e}") @runs_in_hwd_thread def get_public_node(self, address_n, creating): self.creating_wallet = creating return super(SafeTClientBase, self).get_public_node(address_n) @runs_in_hwd_thread def close(self): '''Called when Our wallet was closed or the device removed.''' self.logger.info("closing client") self.clear_session() # Release the device self.transport.close() def firmware_version(self): f = self.features return (f.major_version, f.minor_version, f.patch_version) def atleast_version(self, major, minor=0, patch=0): return self.firmware_version() >= (major, minor, patch) @staticmethod def wrapper(func): '''Wrap methods to clear any message box they opened.''' def wrapped(self, *args, **kwargs): try: self.prevent_timeouts() return func(self, *args, **kwargs) finally: self.used() self.handler.finished() self.creating_wallet = False self.msg = None return wrapped @staticmethod def wrap_methods(cls): for method in ['apply_settings', 'change_pin', 'get_address', 'get_public_node', 'load_device_by_mnemonic', 'load_device_by_xprv', 'recovery_device', 'reset_device', 'sign_message', 'sign_tx', 'wipe_device']: setattr(cls, method, cls.wrapper(getattr(cls, method))) ```
[ { "content": "```python\n# (c) 2014, Chris Church <chris@ninemoreminutes.com>\n# Copyright (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMEN...
[ { "content": "<|memory_start|>```python\n# (c) 2014, Chris Church <chris@ninemoreminutes.com>\n# Copyright (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ =...
```python # (c) 2014, Chris Church <chris@ninemoreminutes.com> # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ author: Ansible Core Team connection: winrm short_description: Run tasks over Microsoft's WinRM description: - Run commands or put/fetch on a target via WinRM - This plugin allows extra arguments to be passed that are supported by the protocol but not explicitly defined here. They should take the form of variables declared with the following pattern `ansible_winrm_<option>`. version_added: "2.0" requirements: - pywinrm (python library) options: # figure out more elegant 'delegation' remote_addr: description: - Address of the windows machine default: inventory_hostname vars: - name: ansible_host - name: ansible_winrm_host remote_user: keywords: - name: user - name: remote_user description: - The user to log in as to the Windows machine vars: - name: ansible_user - name: ansible_winrm_user port: description: - port for winrm to connect on remote target - The default is the https (5896) port, if using http it should be 5895 vars: - name: ansible_port - name: ansible_winrm_port default: 5986 keywords: - name: port type: integer scheme: description: - URI scheme to use - If not set, then will default to C(https) or C(http) if I(port) is C(5985). choices: [http, https] vars: - name: ansible_winrm_scheme path: description: URI path to connect to default: '/wsman' vars: - name: ansible_winrm_path transport: description: - List of winrm transports to attempt to to use (ssl, plaintext, kerberos, etc) - If None (the default) the plugin will try to automatically guess the correct list - The choices avialable depend on your version of pywinrm type: list vars: - name: ansible_winrm_transport kerberos_command: description: kerberos command to use to request a authentication ticket default: kinit vars: - name: ansible_winrm_kinit_cmd kerberos_mode: description: - kerberos usage mode. - The managed option means Ansible will obtain kerberos ticket. - While the manual one means a ticket must already have been obtained by the user. - If having issues with Ansible freezing when trying to obtain the Kerberos ticket, you can either set this to C(manual) and obtain it outside Ansible or install C(pexpect) through pip and try again. choices: [managed, manual] vars: - name: ansible_winrm_kinit_mode connection_timeout: description: - Sets the operation and read timeout settings for the WinRM connection. - Corresponds to the C(operation_timeout_sec) and C(read_timeout_sec) args in pywinrm so avoid setting these vars with this one. - The default value is whatever is set in the installed version of pywinrm. vars: - name: ansible_winrm_connection_timeout """ import base64 import inspect import os import re import traceback import json import tempfile import subprocess HAVE_KERBEROS = False try: import kerberos HAVE_KERBEROS = True except ImportError: pass from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.errors import AnsibleFileNotFound from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils.six.moves.urllib.parse import urlunsplit from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.module_utils.six import binary_type from ansible.plugins.connection import ConnectionBase from ansible.plugins.shell.powershell import leaf_exec from ansible.utils.hashing import secure_hash from ansible.utils.path import makedirs_safe try: import winrm from winrm import Response from winrm.protocol import Protocol HAS_WINRM = True except ImportError as e: HAS_WINRM = False WINRM_IMPORT_ERR = e try: import xmltodict HAS_XMLTODICT = True except ImportError as e: HAS_XMLTODICT = False XMLTODICT_IMPORT_ERR = e try: import pexpect HAS_PEXPECT = True except ImportError as e: HAS_PEXPECT = False # used to try and parse the hostname and detect if IPv6 is being used try: import ipaddress HAS_IPADDRESS = True except ImportError: HAS_IPADDRESS = False try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' transport = 'winrm' module_implementation_preferences = ('.ps1', '.exe', '') become_methods = ['runas'] allow_executable = False has_pipelining = True allow_extras = True def __init__(self, *args, **kwargs): self.always_pipeline_modules = True self.has_native_async = True self.protocol = None self.shell_id = None self.delegate = None self._shell_type = 'powershell' super(Connection, self).__init__(*args, **kwargs) def set_options(self, task_keys=None, var_options=None, direct=None): if not HAS_WINRM: return super(Connection, self).set_options(task_keys=None, var_options=var_options, direct=direct) self._winrm_host = self.get_option('remote_addr') self._winrm_user = self.get_option('remote_user') self._winrm_pass = self._play_context.password self._become_method = self._play_context.become_method self._become_user = self._play_context.become_user self._become_pass = self._play_context.become_pass self._winrm_port = self.get_option('port') self._winrm_scheme = self.get_option('scheme') # old behaviour, scheme should default to http if not set and the port # is 5985 otherwise https if self._winrm_scheme is None: self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https' self._winrm_path = self.get_option('path') self._kinit_cmd = self.get_option('kerberos_command') self._winrm_transport = self.get_option('transport') self._winrm_connection_timeout = self.get_option('connection_timeout') if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'): self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES) else: # for legacy versions of pywinrm, use the values we know are supported self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos']) # calculate transport if needed if self._winrm_transport is None or self._winrm_transport[0] is None: # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic transport_selector = ['ssl'] if self._winrm_scheme == 'https' else ['plaintext'] if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)): self._winrm_transport = ['kerberos'] + transport_selector else: self._winrm_transport = transport_selector unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes) if unsupported_transports: raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % to_native(list(unsupported_transports), nonstring='simplerepr')) # if kerberos is among our transports and there's a password specified, we're managing the tickets kinit_mode = self.get_option('kerberos_mode') if kinit_mode is None: # HACK: ideally, remove multi-transport stuff self._kerb_managed = "kerberos" in self._winrm_transport and (self._winrm_pass is not None and self._winrm_pass != "") elif kinit_mode == "managed": self._kerb_managed = True elif kinit_mode == "manual": self._kerb_managed = False # arg names we're going passing directly internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd']) self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass) argspec = inspect.getargspec(Protocol.__init__) supported_winrm_args = set(argspec.args) supported_winrm_args.update(internal_kwarg_mask) passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in self.get_option('_extras')]) unsupported_args = passed_winrm_args.difference(supported_winrm_args) # warn for kwargs unsupported by the installed version of pywinrm for arg in unsupported_args: display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg)) # pass through matching extras, excluding the list we want to treat specially for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args): self._winrm_kwargs[arg] = self.get_option('_extras')['ansible_winrm_%s' % arg] # Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection # auth itself with a private CCACHE. def _kerb_auth(self, principal, password): if password is None: password = "" self._kerb_ccache = tempfile.NamedTemporaryFile() display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name) krb5ccname = "FILE:%s" % self._kerb_ccache.name os.environ["KRB5CCNAME"] = krb5ccname krb5env = dict(KRB5CCNAME=krb5ccname) # stores various flags to call with kinit, we currently only use this # to set -f so we can get a forward-able ticket (cred delegation) kinit_flags = [] if boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)): kinit_flags.append('-f') kinit_cmdline = [self._kinit_cmd] kinit_cmdline.extend(kinit_flags) kinit_cmdline.append(principal) # pexpect runs the process in its own pty so it can correctly send # the password as input even on MacOS which blocks subprocess from # doing so. Unfortunately it is not available on the built in Python # so we can only use it if someone has installed it if HAS_PEXPECT: proc_mechanism = "pexpect" command = kinit_cmdline.pop(0) password = to_text(password, encoding='utf-8', errors='surrogate_or_strict') display.vvvv("calling kinit with pexpect for principal %s" % principal) try: child = pexpect.spawn(command, kinit_cmdline, timeout=60, env=krb5env) except pexpect.ExceptionPexpect as err: err_msg = "Kerberos auth failure when calling kinit cmd " \ "'%s': %s" % (command, to_native(err)) raise AnsibleConnectionFailure(err_msg) try: child.expect(".*:") child.sendline(password) except OSError as err: # child exited before the pass was sent, Ansible will raise # error based on the rc below, just display the error here display.vvvv("kinit with pexpect raised OSError: %s" % to_native(err)) # technically this is the stdout + stderr but to match the # subprocess error checking behaviour, we will call it stderr stderr = child.read() child.wait() rc = child.exitstatus else: proc_mechanism = "subprocess" password = to_bytes(password, encoding='utf-8', errors='surrogate_or_strict') display.vvvv("calling kinit with subprocess for principal %s" % principal) try: p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=krb5env) except OSError as err: err_msg = "Kerberos auth failure when calling kinit cmd " \ "'%s': %s" % (self._kinit_cmd, to_native(err)) raise AnsibleConnectionFailure(err_msg) stdout, stderr = p.communicate(password + b'\n') rc = p.returncode != 0 if rc != 0: err_msg = "Kerberos auth failure for principal %s with %s: %s" \ % (principal, proc_mechanism, to_native(stderr.strip())) raise AnsibleConnectionFailure(err_msg) display.vvvvv("kinit succeeded for principal %s" % principal) def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. ''' display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host) winrm_host = self._winrm_host if HAS_IPADDRESS: display.vvvv("checking if winrm_host %s is an IPv6 address" % winrm_host) try: ipaddress.IPv6Address(winrm_host) except ipaddress.AddressValueError: pass else: winrm_host = "[%s]" % winrm_host netloc = '%s:%d' % (winrm_host, self._winrm_port) endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', '')) errors = [] for transport in self._winrm_transport: if transport == 'kerberos': if not HAVE_KERBEROS: errors.append('kerberos: the python kerberos library is not installed') continue if self._kerb_managed: self._kerb_auth(self._winrm_user, self._winrm_pass) display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host) try: winrm_kwargs = self._winrm_kwargs.copy() if self._winrm_connection_timeout: winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1 protocol = Protocol(endpoint, transport=transport, **winrm_kwargs) # open the shell from connect so we know we're able to talk to the server if not self.shell_id: self.shell_id = protocol.open_shell(codepage=65001) # UTF-8 display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host) return protocol except Exception as e: err_msg = to_text(e).strip() if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I): raise AnsibleError('the connection attempt timed out') m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg) if m: code = int(m.groups()[0]) if code == 401: err_msg = 'the specified credentials were rejected by the server' elif code == 411: return protocol errors.append(u'%s: %s' % (transport, err_msg)) display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host) if errors: raise AnsibleConnectionFailure(', '.join(map(to_native, errors))) else: raise AnsibleError('No transport found for WinRM connection') def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False): rq = {'env:Envelope': protocol._get_soap_header( resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send', shell_id=shell_id)} stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\ .setdefault('rsp:Stream', {}) stream['@Name'] = 'stdin' stream['@CommandId'] = command_id stream['#text'] = base64.b64encode(to_bytes(stdin)) if eof: stream['@End'] = 'true' protocol.send_message(xmltodict.unparse(rq)) def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None): if not self.protocol: self.protocol = self._winrm_connect() self._connected = True if from_exec: display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) else: display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) command_id = None try: stdin_push_failed = False command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None)) try: if stdin_iterator: for (data, is_last) in stdin_iterator: self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last) except Exception as ex: display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % to_text(ex)) stdin_push_failed = True # NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy). # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure. resptuple = self.protocol.get_command_output(self.shell_id, command_id) # ensure stdout/stderr are text for py3 # FUTURE: this should probably be done internally by pywinrm response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple)) # TODO: check result from response and set stdin_push_failed if we have nonzero if from_exec: display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) else: display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host) display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host) if stdin_push_failed: stderr = to_bytes(response.std_err, encoding='utf-8') if self.is_clixml(stderr): stderr = self.parse_clixml_stream(stderr) raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (to_native(response.std_out), to_native(stderr))) return response finally: if command_id: self.protocol.cleanup_command(self.shell_id, command_id) def _connect(self): if not HAS_WINRM: raise AnsibleError("winrm or requests is not installed: %s" % to_native(WINRM_IMPORT_ERR)) elif not HAS_XMLTODICT: raise AnsibleError("xmltodict is not installed: %s" % to_native(XMLTODICT_IMPORT_ERR)) super(Connection, self)._connect() if not self.protocol: self.protocol = self._winrm_connect() self._connected = True return self def _reset(self): # used by win_reboot (and any other action that might need to bounce the state) self.protocol = None self.shell_id = None self._connect() def _create_raw_wrapper_payload(self, cmd, environment=None): environment = {} if environment is None else environment payload = { 'module_entry': to_text(base64.b64encode(to_bytes(cmd))), 'powershell_modules': {}, 'actions': ['exec'], 'exec': to_text(base64.b64encode(to_bytes(leaf_exec))), 'environment': environment, 'min_ps_version': None, 'min_os_version': None } return json.dumps(payload) def _wrapper_payload_stream(self, payload, buffer_size=200000): payload_bytes = to_bytes(payload) byte_count = len(payload_bytes) for i in range(0, byte_count, buffer_size): yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count def exec_command(self, cmd, in_data=None, sudoable=True): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False) # TODO: display something meaningful here display.vvv("EXEC (via pipeline wrapper)") stdin_iterator = None if in_data: stdin_iterator = self._wrapper_payload_stream(in_data) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator) result.std_out = to_bytes(result.std_out) result.std_err = to_bytes(result.std_err) # parse just stderr from CLIXML output if self.is_clixml(result.std_err): try: result.std_err = self.parse_clixml_stream(result.std_err) except Exception: # unsure if we're guaranteed a valid xml doc- use raw output in case of error pass return (result.status_code, result.std_out, result.std_err) def is_clixml(self, value): return value.startswith(b"#< CLIXML") # hacky way to get just stdout- not always sure of doc framing here, so use with care def parse_clixml_stream(self, clixml_doc, stream_name='Error'): clear_xml = clixml_doc.replace(b'#< CLIXML\r\n', b'') doc = xmltodict.parse(clear_xml) lines = [l.get('#text', '').replace('_x000D__x000A_', '') for l in doc.get('Objs', {}).get('S', {}) if l.get('@S') == stream_name] return '\r\n'.join(lines) # FUTURE: determine buffer size at runtime via remote winrm config? def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000): in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict')) offset = 0 with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: for out_data in iter((lambda: in_file.read(buffer_size)), b''): offset += len(out_data) self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host) # yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded b64_data = base64.b64encode(out_data) + b'\r\n' # cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal yield b64_data, (in_file.tell() == in_size) if offset == 0: # empty file, return an empty buffer + eof to close it yield "", True def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) out_path = self._shell._unquote(out_path) display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path)) script_template = u''' begin {{ $path = '{0}' $DebugPreference = "Continue" $ErrorActionPreference = "Stop" Set-StrictMode -Version 2 $fd = [System.IO.File]::Create($path) $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create() $bytes = @() #initialize for empty file case }} process {{ $bytes = [System.Convert]::FromBase64String($input) $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null $fd.Write($bytes, 0, $bytes.Length) }} end {{ $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant() $fd.Close() Write-Output "{{""sha1"":""$hash""}}" }} ''' script = script_template.format(self._shell._escape(out_path)) cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path)) # TODO: improve error handling if result.status_code != 0: raise AnsibleError(to_native(result.std_err)) put_output = json.loads(result.std_out) remote_sha1 = put_output.get("sha1") if not remote_sha1: raise AnsibleError("Remote sha1 was not returned") local_sha1 = secure_hash(in_path) if not remote_sha1 == local_sha1: raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1))) def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) in_path = self._shell._unquote(in_path) out_path = out_path.replace('\\', '/') display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) buffer_size = 2**19 # 0.5MB chunks makedirs_safe(os.path.dirname(out_path)) out_file = None try: offset = 0 while True: try: script = ''' $path = "%(path)s" If (Test-Path -Path $path -PathType Leaf) { $buffer_size = %(buffer_size)d $offset = %(offset)d $stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite) $stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null $buffer = New-Object -TypeName byte[] $buffer_size $bytes_read = $stream.Read($buffer, 0, $buffer_size) if ($bytes_read -gt 0) { $bytes = $buffer[0..($bytes_read - 1)] [System.Convert]::ToBase64String($bytes) } $stream.Close() > $null } ElseIf (Test-Path -Path $path -PathType Container) { Write-Host "[DIR]"; } Else { Write-Error "$path does not exist"; Exit 1; } ''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset) display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host) cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) if result.status_code != 0: raise IOError(to_native(result.std_err)) if result.std_out.strip() == '[DIR]': data = None else: data = base64.b64decode(result.std_out.strip()) if data is None: makedirs_safe(out_path) break else: if not out_file: # If out_path is a directory and we're expecting a file, bail out now. if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')): break out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') out_file.write(data) if len(data) < buffer_size: break offset += len(data) except Exception: traceback.print_exc() raise AnsibleError('failed to transfer file to "%s"' % to_native(out_path)) finally: if out_file: out_file.close() def close(self): if self.protocol and self.shell_id: display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host) self.protocol.close_shell(self.shell_id) self.shell_id = None self.protocol = None self._connected = False ```
[ { "content": "Reconstruct the code exactly:\n```python\n# -*- coding: UTF-8 -*-\n#\n# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.\n#\n# Authors: Hervé Cauwelier <herve@itaapy.com>\n# Luis Belmar-Letelier <luis@itaapy.com>\n# David Versmisse <david.versmisse@itaapy.com>\n#\n# This f...
[ { "content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# -*- coding: UTF-8 -*-\n#\n# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.\n#\n# Authors: Hervé Cauwelier <herve@itaapy.com>\n# Luis Belmar-Letelier <luis@itaapy.com>\n# David Versmisse <david.versmisse@itaapy.c...
```python # -*- coding: UTF-8 -*- # # Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend. # # Authors: Hervé Cauwelier <herve@itaapy.com> # Luis Belmar-Letelier <luis@itaapy.com> # David Versmisse <david.versmisse@itaapy.com> # # This file is part of Lpod (see: http://lpod-project.org). # Lpod is free software; you can redistribute it and/or modify it under # the terms of either: # # a) the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) # any later version. # Lpod is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Lpod. If not, see <http://www.gnu.org/licenses/>. # # b) the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Import from the Standard Library from unittest import TestCase, main # Import from lpod from lpod.container import ODF_EXTENSIONS from lpod.container import odf_get_container from lpod.container import odf_new_container_from_type from lpod.container import odf_new_container_from_template from lpod.vfs import vfs class NewContainerFromTemplateTestCase(TestCase): def test_bad_template(self): self.assertRaises(ValueError, odf_new_container_from_template, '../templates/notexisting') def test_text_template(self): uri = '../templates/text.ott' self.assert_(odf_new_container_from_template(uri)) def test_spreadsheet_template(self): uri = '../templates/spreadsheet.ots' self.assert_(odf_new_container_from_template(uri)) def test_presentation_template(self): uri = '../templates/presentation.otp' self.assert_(odf_new_container_from_template(uri)) def test_drawing_template(self): uri = '../templates/drawing.otg' self.assert_(odf_new_container_from_template(uri)) class NewContainerFromTypeTestCase(TestCase): def test_bad_type(self): self.assertRaises(ValueError, odf_new_container_from_type, 'foobar') def test_text_type(self): self.assert_(odf_new_container_from_type('text')) def test_spreadsheet_type(self): self.assert_(odf_new_container_from_type('spreadsheet')) def test_presentation_type(self): self.assert_(odf_new_container_from_type('presentation')) def test_drawing_type(self): self.assert_(odf_new_container_from_type('drawing')) class GetContainerTestCase(TestCase): def test_filesystem(self): path = 'samples/example.odt' self.assert_(odf_get_container(path)) def test_odf_xml(self): path = 'samples/example.xml' self.assert_(odf_get_container(path)) def test_http(self): uri = 'http://ftp.lpod-project.org/example.odt' self.assert_(odf_get_container(uri)) def test_ftp(self): uri = 'ftp://ftp.lpod-project.org/example.odt' self.assert_(odf_get_container(uri)) class ContainerTestCase(TestCase): def test_clone(self): container = odf_new_container_from_type('text') clone = container.clone() self.assertEqual(clone.uri, None) self.assertNotEqual(clone._odf_container__data, None) def test_get_part_xml(self): container = odf_get_container('samples/example.odt') content = container.get_part('content') xml_decl = '<?xml version="1.0" encoding="UTF-8"?>' self.assert_(content.startswith(xml_decl)) def test_get_part_mimetype(self): container = odf_get_container('samples/example.odt') mimetype = container.get_part('mimetype') self.assertEqual(mimetype, ODF_EXTENSIONS['odt']) def test_odf_xml_bad_part(self): container = odf_get_container('samples/example.xml') self.assertRaises(ValueError, container.get_part, 'Pictures/a.jpg') def test_odf_xml_part_xml(self): container = odf_get_container('samples/example.xml') meta = container.get_part('meta') self.assert_(meta.startswith('<office:document-meta>')) def test_set_part(self): container = odf_get_container('samples/example.odt') path = 'Pictures/a.jpg' data = 'JFIFIThinkImAnImage' container.set_part(path, data) self.assertEqual(container.get_part(path), data) def test_del_part(self): container = odf_get_container('samples/example.odt') # Not a realistic test path = 'content' container.del_part(path) self.assertRaises(ValueError, container.get_part, path) class ContainerSaveTestCase(TestCase): def setUp(self): vfs.make_folder('trash') def tearDown(self): vfs.remove('trash') def test_save_zip(self): """TODO: 2 cases 1. from "zip" to "zip" 2. from "flat" to "zip" """ container = odf_get_container('samples/example.odt') container.save('trash/example.odt') # TODO FINISH ME # XXX We must implement the flat xml part def xtest_save_flat(self): """TODO: 2 cases 1. from "zip" to "flat" 2. from "flat" to "flat" """ raise NotImplementedError if __name__ == '__main__': main() ```
[ { "content": "Produce an exact reconstruction of the code:\n```python\n# Bulletproof Arma Launcher\n# Copyright (C) 2017 Lukasz Taczuk\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software...
[ { "content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n# Bulletproof Arma Launcher\n# Copyright (C) 2017 Lukasz Taczuk\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by t...
```python # Bulletproof Arma Launcher # Copyright (C) 2017 Lukasz Taczuk # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. from __future__ import unicode_literals import errno import hashlib import os from utils import paths from utils import context def get_cache_directory(): return paths.get_launcher_directory('filecache') def map_file(url): """Get the path where the file should be stored in the cache.""" file_name = hashlib.sha256(url).hexdigest() return os.path.join(get_cache_directory(), file_name) def get_file(url): """Get the file contents from the cache or None if the file is not present in the cache. """ path = map_file(url) f = None try: f = open(path, 'rb') return f.read() except IOError as ex: if ex.errno == errno.ENOENT: # No such file return None raise finally: if f: f.close() def save_file(url, data): """Save the file contents to the cache. The contents of the file are saved to a temporary file and then moved to ensure that no truncated file is present in the cache. """ # Ensure the directory exists paths.mkdir_p(get_cache_directory()) path = map_file(url) tmp_path = path + '_tmp' f = open(tmp_path, 'wb') f.write(data) f.close() # Ensure the file does not exist (would raise an exception on Windows with context.ignore_nosuchfile_exception(): os.unlink(path) os.rename(tmp_path, path) ```
[ { "content": "```python\nimport gzip\nimport pytest\n\nimport spindrift.http as http\nimport spindrift.network as network\n\n\nPORT = 12345\n\n\nclass Context(object):\n\n def __init__(self):\n self.server = 0\n self.client = 0\n\n\nclass Server(http.HTTPHandler):\n\n def on_http_data(self):...
[ { "content": "<|memory_start|>```python\nimport gzip\nimport pytest\n\nimport spindrift.http as http\nimport spindrift.network as network\n\n\nPORT = 12345\n\n\nclass Context(object):\n\n def __init__(self):\n self.server = 0\n self.client = 0\n\n\nclass Server(http.HTTPHandler):\n\n def on_...
```python import gzip import pytest import spindrift.http as http import spindrift.network as network PORT = 12345 class Context(object): def __init__(self): self.server = 0 self.client = 0 class Server(http.HTTPHandler): def on_http_data(self): self.context.server += 1 self.http_send_server() class Client(http.HTTPHandler): def on_ready(self): self.http_send() def on_http_data(self): self.context.client += 1 self.close('done') @pytest.fixture def ctx(): return Context() @pytest.fixture def net(ctx): n = network.Network() n.add_server(PORT, Server, context=ctx) yield n n.close() def test_basic(ctx, net): c = net.add_connection('localhost', PORT, Client, context=ctx) while c.is_open: net.service() assert ctx.server == 1 assert ctx.client == 1 class PipelineClient(http.HTTPHandler): def on_ready(self): self.http_send() self.http_send() self.http_send() def on_http_data(self): self.context.client += 1 if self.context.client == 3: self.close() def test_pipeline(ctx, net): c = net.add_connection('localhost', PORT, PipelineClient, context=ctx) while c.is_open: net.service() assert ctx.server == 3 assert ctx.client == 3 def test_gzip(): handler = http.HTTPHandler(0, network.Network()) data = b'This Is A Test' zdata = gzip.compress(data) handler.http_content = zdata handler._on_http_data() assert handler.http_content == zdata handler.http_headers = {'content-encoding': 'gzip'} handler._on_http_data() assert handler.http_content == data handler.http_headers['content-type'] = 'text/html; charset=utf-8' handler.http_content = zdata handler._on_http_data() assert handler.http_content == data.decode() def test_server_compress(): data = 'This is a TeSt' class _handler(http.HTTPHandler): def _send(self, headers, content): print(headers) self.tested = True assert content == gzip.compress(data.encode()) handler = _handler(0, network.Network()) handler.tested = False handler.http_send_server(data, gzip=True) assert handler.tested ```
[ { "content": "Here is the source code:\n```python\n\"\"\"\nOperations on wrappers\n\"\"\"\n\nimport array\nimport __builtin__\nimport ctypes\nimport collections\nimport functools\nfrom ROOT import THStack, TGraphAsymmErrors\n\nimport history\nimport wrappers\n\n\nclass OperationError(Exception): pass\nclass Too...
[ { "content": "Here is the source code:\n<|memory_start|>```python\n\"\"\"\nOperations on wrappers\n\"\"\"\n\nimport array\nimport __builtin__\nimport ctypes\nimport collections\nimport functools\nfrom ROOT import THStack, TGraphAsymmErrors\n\nimport history\nimport wrappers\n\n\nclass OperationError(Exception):...
```python """ Operations on wrappers """ import array import __builtin__ import ctypes import collections import functools from ROOT import THStack, TGraphAsymmErrors import history import wrappers class OperationError(Exception): pass class TooFewWrpsError(OperationError): pass class TooManyWrpsError(OperationError): pass class WrongInputError(OperationError): pass class NoLumiMatchError(OperationError): pass def iterableize(obj): if isinstance(obj, collections.Iterable): return obj else: return [obj] def add_wrp_kws(func): """Pops 'wrp_kws' from given keywords and updates returned wrapper.""" @functools.wraps(func) def catch_wrp_kws(*args, **kws): wrp_kws = kws.pop('wrp_kws', {}) ret = func(*args, **kws) ret.__dict__.update(wrp_kws) return ret return catch_wrp_kws @add_wrp_kws @history.track_history def stack(wrps): """ Applies only to HistoWrappers. Returns StackWrapper. Checks lumi to be equal among all wrappers. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1,4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> h2 = TH1I("h2", "", 2, .5, 4.5) >>> h2.Fill(1,3) 1 >>> h2.Fill(3,6) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=2.) >>> w3 = stack([w1, w2]) >>> w3.histo.Integral() 13.0 >>> w3.lumi 2.0 """ wrps = iterableize(wrps) stk_wrp = None lumi = 0. info = None sample = "" for wrp in wrps: if not isinstance(wrp, wrappers.HistoWrapper): # histo check raise WrongInputError( "stack accepts only HistoWrappers. wrp: " + str(wrp) ) if not stk_wrp: # stack init stk_wrp = THStack(wrp.name, wrp.title) lumi = wrp.lumi info = wrp.all_info() sample = wrp.sample elif lumi != wrp.lumi: # lumi check raise NoLumiMatchError( "stack needs lumis to match. (%f != %f)" % (lumi, wrp.lumi) ) if sample != wrp.sample: # add to stack sample = "" stk_wrp.Add(wrp.histo) if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) if not sample: del info["sample"] return wrappers.StackWrapper(stk_wrp, **info) @add_wrp_kws @history.track_history def sum(wrps): """ Applies only to HistoWrappers. Returns HistoWrapper. Adds lumi up. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> h2 = TH1I("h2", "", 2, .5, 4.5) >>> h2.Fill(1) 1 >>> h2.Fill(3) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=3.) >>> w3 = sum([w1, w2]) >>> w3.histo.Integral() 3.0 >>> w3.lumi 5.0 """ wrps = iterableize(wrps) histo = None lumi = 0. info = None for wrp in wrps: if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "sum accepts only HistoWrappers. wrp: " + str(wrp) ) if histo: histo.Add(wrp.histo) else: histo = wrp.histo.Clone() info = wrp.all_info() lumi += wrp.lumi if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) info["lumi"] = lumi return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def diff(wrps): """ Applies only to HistoWrappers. Returns HistoWrapper. Takes lumi from first. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1, 2) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> h2 = TH1I("h2", "", 2, .5, 4.5) >>> h2.Fill(1) 1 >>> w2 = wrappers.HistoWrapper(h2, lumi=3.) >>> w3 = diff([w1, w2]) >>> w3.histo.Integral() 1.0 >>> w3.lumi 2.0 """ wrps = iterableize(wrps) histo = None lumi = 0. info = None for wrp in wrps: if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "sum accepts only HistoWrappers. wrp: " + str(wrp) ) if histo: histo.Add(wrp.histo, -1.) else: histo = wrp.histo.Clone() info = wrp.all_info() lumi = wrp.lumi if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) info["lumi"] = lumi return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def merge(wrps): """ Applies only to HistoWrapper. Returns HistoWrapper. Normalizes histos to lumi. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1,4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> h2 = TH1I("h2", "", 2, .5, 2.5) >>> h2.Fill(1,3) 1 >>> h2.Fill(2,6) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=3.) >>> w3 = merge([w1, w2]) >>> w3.histo.Integral() 5.0 >>> w3.lumi 1.0 """ wrps = iterableize(wrps) histo = None info = None for wrp in wrps: if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "merge accepts only HistoWrappers. wrp: " + str(wrp) ) if histo: histo.Add(wrp.histo, 1. / wrp.lumi) else: histo = wrp.histo.Clone() histo.Scale(1. / wrp.lumi) info = wrp.all_info() if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) info["lumi"] = 1. return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def prod(wrps): """ Applies to HistoWrapper and FloatWrapper. Returns HistoWrapper. Takes lumi from first. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2, history="w1") >>> h2 = TH1I("h2", "", 2, .5, 2.5) >>> h2.Fill(1) 1 >>> h2.Fill(2) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=3) >>> w3 = prod([w1, w2]) >>> w3.histo.Integral() 1.0 >>> w3.lumi 1.0 >>> w4 = wrappers.FloatWrapper(2.) >>> w5 = prod([w1, w4]) >>> w5.histo.Integral() 2.0 """ wrps = iterableize(wrps) histo = None info = None lumi = 1. for wrp in wrps: if histo: if isinstance(wrp, wrappers.HistoWrapper): histo.Multiply(wrp.histo) lumi = 1. elif not isinstance(wrp, wrappers.FloatWrapper): raise WrongInputError( "prod accepts only HistoWrappers and FloatWrappers. wrp: " + str(wrp) ) else: histo.Scale(wrp.float) lumi *= wrp.float else: if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "prod expects first argument to be of type HistoWrapper. wrp: " + str(wrp) ) histo = wrp.histo.Clone() info = wrp.all_info() lumi = wrp.lumi if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) info["lumi"] = lumi return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def div(wrps): """ Applies to HistoWrapper and FloatWrapper. Returns HistoWrapper. Takes lumi from first. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1,4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2) >>> h2 = TH1I("h2", "", 2, .5, 2.5) >>> h2.Fill(1,2) 1 >>> w2 = wrappers.HistoWrapper(h2, lumi=3) >>> w3 = div([w1, w2]) >>> w3.histo.Integral() 2.0 >>> w4 = wrappers.FloatWrapper(2., history="w4") >>> w5 = div([w1, w4]) >>> w5.histo.Integral() 2.0 """ wrps = iterableize(wrps) wrps = iter(wrps) try: nominator = next(wrps) denominator = next(wrps) except StopIteration: raise TooFewWrpsError("div needs exactly two Wrappers.") try: wrps.next() raise TooManyWrpsError("div needs exactly two Wrappers.") except StopIteration: pass if not isinstance(nominator, wrappers.HistoWrapper): raise WrongInputError( "div needs nominator to be of type HistoWrapper. nominator: " + str(nominator) ) if not (isinstance(denominator, wrappers.HistoWrapper) or isinstance(denominator, wrappers.FloatWrapper)): raise WrongInputError( "div needs denominator to be of type HistoWrapper or FloatWrapper. denominator: " + str(denominator) ) histo = nominator.histo.Clone() lumi = nominator.lumi if isinstance(denominator, wrappers.HistoWrapper): histo.Divide(denominator.histo) lumi = 1. else: histo.Scale(1. / denominator.float) lumi /= denominator.float info = nominator.all_info() info["lumi"] = lumi return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def lumi(wrp): """ Applies to HistoWrapper. Returns FloatWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w2 = lumi(w1) >>> w2.float 2.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "lumi needs argument of type HistoWrapper. histo: " + str(wrp) ) info = wrp.all_info() return wrappers.FloatWrapper(wrp.lumi, **info) @add_wrp_kws @history.track_history def norm_to_lumi(wrp): """ Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1, 4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w1.histo.Integral() 4.0 >>> w2 = norm_to_lumi(w1) >>> w2.histo.Integral() 2.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "norm_to_lumi needs argument of type HistoWrapper. histo: " + str(wrp) ) histo = wrp.histo.Clone() histo.Scale(1. / wrp.lumi) info = wrp.all_info() info["lumi"] = 1. return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def norm_to_integral(wrp, use_bin_width=False): """ Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1, 4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w1.histo.Integral() 4.0 >>> w2 = norm_to_integral(w1) >>> w2.histo.Integral() 1.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "norm_to_integral needs argument of type HistoWrapper. histo: " + str(wrp) ) histo = wrp.histo.Clone() option = "width" if use_bin_width else "" integr = wrp.histo.Integral(option) or 1. histo.Scale(1. / integr) info = wrp.all_info() info["lumi"] /= integr return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def copy(wrp): """ Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1, 4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w2=copy(w1) >>> w2.histo.GetName() 'h1' >>> w1.name == w2.name True >>> w1.histo.Integral() == w2.histo.Integral() True >>> w1.histo != w2.histo True """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "copy needs argument of type HistoWrapper. histo: " + str(wrp) ) histo = wrp.histo.Clone() info = wrp.all_info() return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def rebin(wrp, bin_bounds, norm_by_bin_width=False): """ Applies to HistoWrapper. Returns Histowrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 4, .5, 4.5) >>> h1.Fill(1) 1 >>> h1.Fill(2) 2 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w2=rebin(w1, [.5, 2.5, 4.5]) >>> w1.histo.GetNbinsX() 4 >>> w2.histo.GetNbinsX() 2 >>> w2.histo.GetBinContent(1) 2.0 >>> w2.histo.GetBinContent(2) 0.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "rebin needs argument of type HistoWrapper. histo: " + str(wrp) ) if len(bin_bounds) < 2: raise OperationError( "Number of bins < 2, must include at least one bin!" ) bin_bounds = array.array("d", bin_bounds) orig_bin_width = wrp.histo.GetBinWidth(1) histo = wrp.histo.Rebin( len(bin_bounds) - 1, wrp.name, bin_bounds ) if norm_by_bin_width: for i in xrange(histo.GetNbinsX()+1): factor = histo.GetBinWidth(i) / orig_bin_width histo.SetBinContent(i, histo.GetBinContent(i) / factor) histo.SetBinError(i, histo.GetBinError(i) / factor) info = wrp.all_info() return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def trim(wrp, left=True, right=True): """ Applies to HistoWrapper. Returns Histowrapper. If left / right are set to values, these are applied. Otherwise empty bins are cut off. >>> from ROOT import TH1I >>> w1 = wrappers.HistoWrapper(TH1I("h1", "", 10, .5, 10.5)) >>> w1.histo.Fill(5) 5 >>> w2 = trim(w1) >>> w2.histo.GetNbinsX() 1 >>> w2.histo.GetXaxis().GetXmin() 4.5 >>> w2.histo.GetXaxis().GetXmax() 5.5 >>> w2 = trim(w1, 3.5, 7.5) >>> w2.histo.GetNbinsX() 4 >>> w2.histo.GetXaxis().GetXmin() 3.5 >>> w2.histo.GetXaxis().GetXmax() 7.5 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "trim needs argument of type HistoWrapper. histo: " + str(wrp) ) # find left / right values if not given histo = wrp.histo axis = histo.GetXaxis() n_bins = histo.GetNbinsX() if type(left) == bool: if left: for i in xrange(n_bins+1): if histo.GetBinContent(i): left = axis.GetBinLowEdge(i) break else: left = axis.GetXmin() if type(right) == bool: if right: for i in xrange(n_bins+1, 0, -1): if histo.GetBinContent(i): right = axis.GetBinUpEdge(i) break else: right = axis.GetXmax() if left > right: raise OperationError("bounds: left > right") # create new bin_bounds index = 0 while axis.GetBinLowEdge(index) < left: index += 1 bin_bounds = [axis.GetBinLowEdge(index)] while axis.GetBinUpEdge(index) <= right: bin_bounds.append(axis.GetBinUpEdge(index)) index += 1 return rebin(wrp, bin_bounds) @add_wrp_kws @history.track_history def mv_in(wrp, overflow=True, underflow=True): """ Moves under- and/or overflow bin into first/last bin. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(0) -1 >>> h1.Fill(5,3) -1 >>> w1 = wrappers.HistoWrapper(h1) >>> w1.histo.Integral() 0.0 >>> w2 = mv_in(w1, False, False) >>> w2.histo.Integral() 0.0 >>> w3 = mv_in(w1, True, False) >>> w3.histo.Integral() 3.0 >>> w4 = mv_in(w1, False, True) >>> w4.histo.Integral() 1.0 >>> w5 = mv_in(w1, True, True) >>> w5.histo.Integral() 4.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "mv_bin needs argument of type HistoWrapper. histo: " + str(wrp) ) histo = wrp.histo.Clone() nbins = histo.GetNbinsX() if underflow: firstbin = histo.GetBinContent(0) firstbin += histo.GetBinContent(1) histo.SetBinContent(1, firstbin) histo.SetBinContent(0, 0.) if overflow: lastbin = histo.GetBinContent(nbins + 1) lastbin += histo.GetBinContent(nbins) histo.SetBinContent(nbins, lastbin) histo.SetBinContent(histo.GetNbinsX() + 1, 0.) return wrappers.HistoWrapper(histo, **wrp.all_info()) @add_wrp_kws @history.track_history def integral(wrp, use_bin_width=False): """ Integral. Applies to HistoWrapper. Returns FloatWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1) 1 >>> h1.Fill(3,3) 2 >>> w1 = wrappers.HistoWrapper(h1) >>> w2 = integral(w1) >>> w2.float 4.0 >>> w3 = integral(w1, True) >>> w3.float 8.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "int needs argument of type HistoWrapper. histo: " + str(wrp) ) option = "width" if use_bin_width else "" info = wrp.all_info() return wrappers.FloatWrapper(wrp.histo.Integral(option), **info) @add_wrp_kws @history.track_history def int_l(wrp, use_bin_width=False): """ Left-sided integral. Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1) 1 >>> h1.Fill(3,2) 2 >>> w1 = wrappers.HistoWrapper(h1) >>> w2 = int_l(w1) >>> w2.histo.GetBinContent(1) 1.0 >>> w2.histo.GetBinContent(2) 3.0 >>> w2 = int_l(w1, True) >>> w2.histo.GetBinContent(1) 2.0 >>> w2.histo.GetBinContent(2) 6.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "int_l needs argument of type HistoWrapper. histo: " + str(wrp) ) int_histo = wrp.histo.Clone() option = "width" if use_bin_width else "" for i in xrange(int_histo.GetNbinsX(), 0, -1): error = ctypes.c_double() value = int_histo.IntegralAndError(1, i, error, option) int_histo.SetBinContent(i, value) int_histo.SetBinError(i, error.value) info = wrp.all_info() return wrappers.HistoWrapper(int_histo, **info) @add_wrp_kws @history.track_history def int_r(wrp, use_bin_width=False): """ Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1) 1 >>> h1.Fill(3,2) 2 >>> w1 = wrappers.HistoWrapper(h1) >>> w2 = int_r(w1) >>> w2.histo.GetBinContent(1) 3.0 >>> w2.histo.GetBinContent(2) 2.0 >>> w2 = int_r(w1, True) >>> w2.histo.GetBinContent(1) 6.0 >>> w2.histo.GetBinContent(2) 4.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "int_r needs argument of type HistoWrapper. histo: " + str(wrp) ) int_histo = wrp.histo.Clone() option = "width" if use_bin_width else "" n_bins = int_histo.GetNbinsX() for i in xrange(1, 1 + n_bins): error = ctypes.c_double() value = int_histo.IntegralAndError(i, n_bins, error, option) int_histo.SetBinContent(i, value) int_histo.SetBinError(i, error.value) info = wrp.all_info() return wrappers.HistoWrapper(int_histo, **info) @add_wrp_kws @history.track_history def chi2(wrps, x_min=0, x_max=0): """ Expects two Histowrappers. Returns FloatWrapper. """ wrps = iterableize(wrps) wrps = iter(wrps) try: first, second = next(wrps), next(wrps) except StopIteration: raise TooFewWrpsError("chi2 needs exactly two HistoWrappers.") try: wrps.next() raise TooManyWrpsError("chi2 needs exactly two HistoWrappers.") except StopIteration: pass for w in (first, second): if not isinstance(w, wrappers.HistoWrapper): raise WrongInputError( "chi2 needs type HistoWrapper. w: " + str(w) ) if not first.histo.GetNbinsX() == second.histo.GetNbinsX(): raise WrongInputError( "chi2 needs histos with same number of bins." ) if not x_max: x_max = int(first.histo.GetNbinsX() - 1) def get_weight_for_bin(i): val = (first.histo.GetBinContent(i+1) - second.histo.GetBinContent(i+1))**2 err1 = first.histo.GetBinError(i+1) err2 = second.histo.GetBinError(i+1) if err1 and err2: return val / (err1**2 + err2**2) else: return 0. chi2_val = __builtin__.sum( get_weight_for_bin(i) for i in xrange(x_min, x_max) ) info = second.all_info() info.update(first.all_info()) return wrappers.FloatWrapper( chi2_val, **info ) @add_wrp_kws @history.track_history def eff(wrps, option=''): """ Applies to HistoWrappers only. Returns GraphWrapper. Takes lumi from first. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1) 1 >>> h1.Fill(1) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2) >>> h2 = TH1I("h2", "", 2, .5, 2.5) >>> h2.Sumw2() >>> h2.Fill(1) 1 >>> h2.Fill(1) 1 >>> h2.Fill(1) 1 >>> h2.Fill(2) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=3) >>> w3 = eff([w1, w2]) >>> w3.graph.GetN() 2 >>> hi = w3.graph.GetErrorYhigh(0) >>> lo = w3.graph.GetErrorYlow(0) >>> abs(hi - 0.277375360987) < 1e-10 True >>> abs(lo - 0.414534706284) < 1e-10 True """ wrps = iterableize(wrps) wrps = iter(wrps) try: nominator = next(wrps) denominator = next(wrps) except StopIteration: raise TooFewWrpsError("eff needs exactly two Wrappers.") try: wrps.next() raise TooManyWrpsError("eff needs exactly two Wrappers.") except StopIteration: pass if not isinstance(nominator, wrappers.HistoWrapper): raise WrongInputError( "eff needs nominator to be of type HistoWrapper. nominator: " + str(nominator) ) if not (isinstance(denominator, wrappers.HistoWrapper)): raise WrongInputError( "eff needs denominator to be of type HistoWrapper. denominator: " + str(denominator) ) graph = TGraphAsymmErrors(nominator.histo, denominator.histo, option) graph.GetXaxis().SetTitle(nominator.histo.GetXaxis().GetTitle()) info = nominator.all_info() return wrappers.GraphWrapper(graph, **info) if __name__ == "__main__": import ROOT ROOT.TH1.AddDirectory(False) import doctest doctest.testmod() ```
[ { "content": "Here is a code file:\n```python\n# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apac...
[ { "content": "Here is a code file:\n<|memory_start|>```python\n# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ...
```python # coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for agents.dqn.categorical_dqn_agent.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.networks import categorical_q_network from tf_agents.networks import network from tf_agents.networks import q_rnn_network from tf_agents.specs import tensor_spec from tf_agents.trajectories import policy_step from tf_agents.trajectories import test_utils from tf_agents.trajectories import time_step as ts from tf_agents.trajectories import trajectory from tf_agents.utils import common class DummyCategoricalNet(network.Network): def __init__(self, input_tensor_spec, num_atoms=51, num_actions=2, name=None): self._num_atoms = num_atoms self._num_actions = num_actions super(DummyCategoricalNet, self).__init__( input_tensor_spec=input_tensor_spec, state_spec=(), name=name) # In CategoricalDQN we are dealing with a distribution over Q-values, which # are represented as num_atoms bins, ranging from min_q_value to # max_q_value. In order to replicate the setup in the non-categorical # network (namely, [[2, 1], [1, 1]]), we use the following "logits": # [[0, 1, ..., num_atoms-1, num_atoms, 1, ..., 1], # [1, ......................................, 1]] # The important bit is that the first half of the first list (which # corresponds to the logits for the first action) place more weight on the # higher q_values than on the lower ones, thereby resulting in a higher # value for the first action. weights_initializer = np.array([ np.concatenate((np.arange(num_atoms), np.ones(num_atoms))), np.concatenate((np.ones(num_atoms), np.ones(num_atoms)))]) kernel_initializer = tf.constant_initializer(weights_initializer) bias_initializer = tf.keras.initializers.Ones() # Store custom layers that can be serialized through the Checkpointable API. self._dummy_layers = [] self._dummy_layers.append( tf.keras.layers.Dense( num_actions * num_atoms, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)) @property def num_atoms(self): return self._num_atoms def call(self, inputs, step_type=None, network_state=()): del step_type inputs = tf.cast(inputs, tf.float32) for layer in self._dummy_layers: inputs = layer(inputs) logits = tf.reshape(inputs, [-1, self._num_actions, self._num_atoms]) return logits, network_state class KerasLayersNet(network.Network): def __init__(self, observation_spec, action_spec, layer, num_atoms=5, name=None): super(KerasLayersNet, self).__init__(observation_spec, state_spec=(), name=name) self._layer = layer self.num_atoms = num_atoms # Dummy, this doesn't match the layer output. def call(self, inputs, step_type=None, network_state=()): del step_type return self._layer(inputs), network_state def create_variables(self, input_spec=None): output_spec = network.create_variables( self._layer, input_spec or self._input_tensor_spec) self._network_output_spec = output_spec self.built = True return output_spec class DummyCategoricalQRnnNetwork(q_rnn_network.QRnnNetwork): def __init__(self, input_tensor_spec, action_spec, num_atoms=51, **kwargs): if not isinstance(action_spec, tensor_spec.BoundedTensorSpec): raise TypeError('action_spec must be a BoundedTensorSpec. Got: %s' % ( action_spec,)) self._num_actions = action_spec.maximum - action_spec.minimum + 1 self._num_atoms = num_atoms q_network_action_spec = tensor_spec.BoundedTensorSpec( (), tf.int32, minimum=0, maximum=self._num_actions * num_atoms - 1) super(DummyCategoricalQRnnNetwork, self).__init__( input_tensor_spec=input_tensor_spec, action_spec=q_network_action_spec, **kwargs) @property def num_atoms(self): return self._num_atoms def call(self, observations, step_type=None, network_state=()): logits, network_state = super(DummyCategoricalQRnnNetwork, self).call( observations, step_type, network_state) shape = logits.shape.as_list() assert shape[-1] == self._num_actions * self._num_atoms new_shape = shape[:-1] + [self._num_actions, self._num_atoms] logits = tf.reshape(logits, new_shape) return logits, network_state class CategoricalDqnAgentTest(tf.test.TestCase): def setUp(self): super(CategoricalDqnAgentTest, self).setUp() tf.compat.v1.enable_resource_variables() self._obs_spec = tensor_spec.TensorSpec([2], tf.float32) self._time_step_spec = ts.time_step_spec(self._obs_spec) self._action_spec = tensor_spec.BoundedTensorSpec((), tf.int32, 0, 1) self._categorical_net = categorical_q_network.CategoricalQNetwork( self._obs_spec, self._action_spec, fc_layer_params=[4]) self._dummy_categorical_net = DummyCategoricalNet(self._obs_spec) self._optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.01) def testCreateAgentNestSizeChecks(self): action_spec = [ tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1), tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1) ] with self.assertRaisesRegex(ValueError, 'Only scalar actions'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, action_spec, self._dummy_categorical_net, self._optimizer) def testCreateAgentDimChecks(self): action_spec = tensor_spec.BoundedTensorSpec([1, 2], tf.int32, 0, 1) with self.assertRaisesRegex(ValueError, 'Only scalar actions'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, action_spec, self._dummy_categorical_net, self._optimizer) def testCreateAgentDefaultNetwork(self): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._categorical_net, self._optimizer) def testCreateAgentWithPrebuiltPreprocessingLayers(self): dense_layer = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Flatten(), tf.keras.layers.Reshape([2, 5]), ]) q_net = KerasLayersNet( self._time_step_spec.observation, self._action_spec, dense_layer) with self.assertRaisesRegexp( ValueError, 'shares weights with the original network'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, categorical_q_network=q_net, optimizer=None) # Explicitly share weights between q and target networks. # This would be an unusual setup so we check that an error is thrown. q_target_net = KerasLayersNet( self._time_step_spec.observation, self._action_spec, dense_layer) with self.assertRaisesRegexp( ValueError, 'shares weights with the original network'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, categorical_q_network=q_net, optimizer=None, target_categorical_q_network=q_target_net) def testCreateAgentWithPrebuiltPreprocessingLayersDiffAtoms(self): dense_layer = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Flatten(), tf.keras.layers.Reshape([2, 5]), ]) q_net = KerasLayersNet( self._time_step_spec.observation, self._action_spec, dense_layer) dense_layer_target = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Reshape([2, 5]), ]) q_bad_target_net = KerasLayersNet( self._time_step_spec.observation, self._action_spec, dense_layer_target, num_atoms=3) with self.assertRaisesRegexp(ValueError, 'have different numbers of atoms'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, categorical_q_network=q_net, optimizer=None, target_categorical_q_network=q_bad_target_net) def testCriticLoss(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._dummy_categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32) next_time_steps = ts.transition(next_observations, rewards, discounts) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, next_time_steps) # Due to the constant initialization of the DummyCategoricalNet, we can # expect the same loss every time. expected_loss = 2.19525 loss_info = agent._loss(experience) self.evaluate(tf.compat.v1.global_variables_initializer()) evaluated_loss = self.evaluate(loss_info).loss self.assertAllClose(evaluated_loss, expected_loss, atol=1e-4) def testCriticLossWithMaskedActions(self): # Observations are now a tuple of the usual observation and an action mask. observation_spec_with_mask = ( self._obs_spec, tensor_spec.BoundedTensorSpec([2], tf.int32, 0, 1)) time_step_spec = ts.time_step_spec(observation_spec_with_mask) dummy_categorical_net = DummyCategoricalNet(self._obs_spec) agent = categorical_dqn_agent.CategoricalDqnAgent( time_step_spec, self._action_spec, dummy_categorical_net, self._optimizer, observation_and_action_constraint_splitter=lambda x: (x[0], x[1])) # For `observations`, the masks are set up so that only one action is valid # for each element in the batch. observations = (tf.constant([[1, 2], [3, 4]], dtype=tf.float32), tf.constant([[1, 0], [0, 1]], dtype=tf.int32)) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) # For `next_observations`, the masks are set up so the opposite actions as # before are valid. next_observations = (tf.constant([[5, 6], [7, 8]], dtype=tf.float32), tf.constant([[0, 1], [1, 0]], dtype=tf.int32)) next_time_steps = ts.transition(next_observations, rewards, discounts) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, next_time_steps) # Due to the constant initialization of the DummyCategoricalNet, we can # expect the same loss every time. Note this is different from the loss in # testCriticLoss above due to previously optimal actions being masked out. expected_loss = 5.062895 loss_info = agent._loss(experience) self.evaluate(tf.compat.v1.global_variables_initializer()) evaluated_loss = self.evaluate(loss_info).loss self.assertAllClose(evaluated_loss, expected_loss, atol=1e-4) def testCriticLossNStep(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._dummy_categorical_net, self._optimizer, n_step_update=2) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32) next_time_steps = ts.transition(next_observations, rewards, discounts) third_observations = tf.constant([[9, 10], [11, 12]], dtype=tf.float32) third_time_steps = ts.transition(third_observations, rewards, discounts) experience1 = trajectory.from_transition( time_steps, action_steps, next_time_steps) experience2 = trajectory.from_transition( next_time_steps, action_steps, third_time_steps) experience3 = trajectory.from_transition( third_time_steps, action_steps, third_time_steps) experience = tf.nest.map_structure( lambda x, y, z: tf.stack([x, y, z], axis=1), experience1, experience2, experience3) loss_info = agent._loss(experience) # discounted_returns should evaluate to 10 + 0.9 * 10 = 19 and # 20 + 0.9 * 20 = 38. evaluated_discounted_returns = self.evaluate(agent._discounted_returns) self.assertAllClose(evaluated_discounted_returns, [[19], [38]], atol=1e-4) # Both final_value_discount values should be 0.9 * 0.9 = 0.81. evaluated_final_value_discount = self.evaluate(agent._final_value_discount) self.assertAllClose(evaluated_final_value_discount, [[0.81], [0.81]], atol=1e-4) # Due to the constant initialization of the DummyCategoricalNet, we can # expect the same loss every time. expected_loss = 2.19525 self.evaluate(tf.compat.v1.global_variables_initializer()) evaluated_loss = self.evaluate(loss_info).loss self.assertAllClose(evaluated_loss, expected_loss, atol=1e-4) def testPolicy(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions, _, _ = agent.policy.action(time_steps) self.assertEqual(actions.shape, [2]) self.evaluate(tf.compat.v1.global_variables_initializer()) actions_ = self.evaluate(actions) self.assertTrue(all(actions_ <= self._action_spec.maximum)) self.assertTrue(all(actions_ >= self._action_spec.minimum)) def testInitialize(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) next_time_steps = ts.transition(observations, rewards, discounts) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, next_time_steps) loss_info = agent._loss(experience) initialize = agent.initialize() self.evaluate(tf.compat.v1.global_variables_initializer()) losses = self.evaluate(loss_info).loss self.assertGreater(losses, 0.0) critic_variables = agent._q_network.variables target_critic_variables = agent._target_q_network.variables self.assertTrue(critic_variables) self.assertTrue(target_critic_variables) self.evaluate(initialize) for s, t in zip(critic_variables, target_critic_variables): self.assertAllClose(self.evaluate(s), self.evaluate(t)) def testUpdateTarget(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, time_steps) loss_info = agent._loss(experience) update_targets = agent._update_target() self.evaluate(tf.compat.v1.global_variables_initializer()) losses = self.evaluate(loss_info).loss self.assertGreater(losses, 0.0) self.evaluate(update_targets) def testTrain(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._dummy_categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32) next_time_steps = ts.transition(next_observations, rewards, discounts) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, next_time_steps) train_step = agent.train(experience, weights=None) # Due to the constant initialization of the DummyCategoricalNet, we can # expect the same loss every time. expected_loss = 2.19525 self.evaluate(tf.compat.v1.global_variables_initializer()) evaluated_loss, _ = self.evaluate(train_step) self.assertAllClose(evaluated_loss, expected_loss, atol=1e-4) def testTrainWithRnn(self): action_spec = tensor_spec.BoundedTensorSpec((), tf.int32, 0, 1) batch_size = 5 observations = tf.constant( [[[1, 2], [3, 4], [5, 6]]] * batch_size, dtype=tf.float32) actions = tf.constant([[0, 1, 1]] * batch_size, dtype=tf.int32) time_steps = ts.TimeStep( step_type=tf.constant([[1] * 3] * batch_size, dtype=tf.int32), reward=tf.constant([[1] * 3] * batch_size, dtype=tf.float32), discount=tf.constant([[1] * 3] * batch_size, dtype=tf.float32), observation=[observations]) experience = trajectory.Trajectory( step_type=time_steps.step_type, observation=observations, action=actions, policy_info=(), next_step_type=time_steps.step_type, reward=time_steps.reward, discount=time_steps.discount) categorical_q_rnn_network = DummyCategoricalQRnnNetwork( self._obs_spec, action_spec, conv_layer_params=None, input_fc_layer_params=(16,), preprocessing_combiner=None, lstm_size=(40,), output_fc_layer_params=(16,), ) counter = common.create_variable('test_train_counter') agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, action_spec, categorical_q_rnn_network, optimizer=tf.compat.v1.train.AdamOptimizer(0.001), ) # Force variable creation. agent.policy.variables() if tf.executing_eagerly(): loss = lambda: agent.train(experience) else: loss = agent.train(experience) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertEqual(self.evaluate(counter), 0) self.evaluate(loss) if __name__ == '__main__': tf.test.main() ```
[ { "content": "Repeat the code precisely:\n```python\nimport os\nimport logging\n\nfrom droptopus import config, settings\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (\n QDialog,\n QDialogButtonBox,\n QFileDialog,\n QFormLayout,\n QHBoxLayout,\n QLabel,\n QLineEdit,\n QMes...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\nimport os\nimport logging\n\nfrom droptopus import config, settings\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (\n QDialog,\n QDialogButtonBox,\n QFileDialog,\n QFormLayout,\n QHBoxLayout,\n QLabel,\n QLin...
```python import os import logging from droptopus import config, settings from PyQt5.QtCore import Qt from PyQt5.QtWidgets import ( QDialog, QDialogButtonBox, QFileDialog, QFormLayout, QHBoxLayout, QLabel, QLineEdit, QMessageBox, QPushButton, ) from PyQt5.QtGui import QPixmap class EditItemForm(QDialog): def __init__(self, item, parent=None): super(EditItemForm, self).__init__(parent) l1 = QLabel("Name:") name = QLineEdit() l2 = QLabel("Description:") desc = QLineEdit() l3 = QLabel("Icon:") icon = QLabel() btn_icon = QPushButton("...") btn_icon.setFixedWidth(50) btn_icon.clicked.connect(self.onChangeIcon) l4 = QLabel("Target Path:") path = QLineEdit() path.setReadOnly(True) btn_path = QPushButton("...") btn_path.setFixedWidth(50) btn_path.clicked.connect(self.onChangePath) layout = QFormLayout(self) layout.addRow(l1, name) layout.addRow(l2, desc) row = QHBoxLayout() row.addWidget(icon) row.addWidget(btn_icon) layout.addRow(l3, row) row = QHBoxLayout() row.addWidget(path) row.addWidget(btn_path) layout.addRow(l4, row) buttons = QDialogButtonBox( QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self ) buttons.accepted.connect(self.accept) buttons.rejected.connect(self.reject) layout.addRow(buttons) self.icon = icon self.name = name self.path = path self.desc = desc self.loadItem(item) def loadItem(self, item): self.icon.setPixmap( QPixmap(item["icon"]).scaled( 50, 50, Qt.KeepAspectRatio, Qt.SmoothTransformation ) ) self.name.setText(item["name"]) self.desc.setText(item["desc"]) self.path.setText(item["path"]) self.item = item def onChangeIcon(self): icon_filepath, _filter = QFileDialog.getOpenFileName( self, "Choose Icon", os.path.dirname(self.item["icon"]) ) if icon_filepath: icon_size = 15 self.icon.setPixmap( QPixmap(icon_filepath).scaled( icon_size, icon_size, Qt.KeepAspectRatio, Qt.SmoothTransformation ) ) self.item["icon"] = icon_filepath def onChangePath(self): path = self.item["path"] if len(self.item["path"]) else os.path.expanduser("~") if self.item["type"] == "dir": path = QFileDialog.getExistingDirectory(self, "Choose a directory", path) if path: self.path.setText(path) self.item["path"] = path else: path, _filter = QFileDialog.getOpenFileName(self, "Open file", path) if path: self.path.setText(path) self.item["path"] = path def validate(self): return True def accept(self): if not self.validate(): QMessageBox.critical( self, "Error", "\n".join(self.validation_errors), QMessageBox.Ok ) return self.item["name"] = self.name.text() self.item["desc"] = self.desc.text() settings.writeItem(self.item) self.close() ```
[ { "content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\nimport argparse\nfrom fractions import Fraction\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport re\nimport time\n\nimport parse_ctcp_output\nimport parse_tcptrace\nimport anal...
[ { "content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\nimport argparse\nfrom fractions import Fraction\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport re\nimport time\n\nimport parse_ctcp_output\nimport parse_tcptr...
```python import argparse from fractions import Fraction import math import matplotlib.pyplot as plt import numpy as np import os import re import time import parse_ctcp_output import parse_tcptrace import analyse_data # Format:- # key: cc_type eg. 10x, 1000x, cubic, cubicsfqCodel etc. # value: dictionary: key - (vary_type, value), value - [filenames] # vary_type: one of 'link' or 'delay', depending on which is varied. # value: value of the link speed (Mbps) or delay (ms). # filenames: list of filenames satisfying the criteria. They will # be different runs of the same configuration. ns_files = {} def preprocess_ns_directory(directory): """ Parse all filenames in directory containing ns2 simulation results and store it so that the appropriate filenames can be rapidly retrieved """ global ns_files file_names = [ os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) ] re_ns_filename = re.compile(r""" (?P<cc_type>\w+)- (?P<vary_type>link|delay) (?P<value>[\d.]+) run\d+.err """, re.VERBOSE) for filename in file_names: match = re_ns_filename.match(filename.split('/')[-1]) if match is None: continue cc_type = match.group('cc_type') file_key = (match.group('vary_type'), float(match.group('value'))) if cc_type not in ns_files: ns_files[cc_type] = {} if file_key not in ns_files[cc_type]: ns_files[cc_type][file_key] = [] ns_files[cc_type][file_key].append( filename ) def get_nearest_ns_throughput_delay(cc_type, vary_type, value): """ Returns (throughput, delay) from ns2 simulation results given cctype (which can be a ratname or 'cubic' or 'cubicsfqCoDel'), vary_type (one of 'link' and 'delay') and value (link rate or delay for which closest match has to be found) """ assert(type(cc_type) is str) assert(vary_type in ['link', 'delay']) assert(type(value) is float) global ns_files # canonicalize cc_type for x in ['2x', '10x', '100x', '1000x']: if x in cc_type: cc_type = x break if cc_type not in ['2x', '10x', '100x', '1000x']: assert(cc_type in ['cubic', 'cubicsfqCoDel']) # find closest match closest_value, closest_filenames = -1e9, [] for x in ns_files: if x != cc_type: continue for y in ns_files[x]: if y[0] != vary_type: continue if abs(y[1] - value) < abs(closest_value - value): closest_value = y[1] closest_filenames = ns_files[x][y] assert(closest_value > 0) # parse the files re_ns_result_line = re.compile(r""" .*tp=(?P<throughput>[\d.]+) #mbps .*del=(?P<delay>[\d.]+) #ms .*on=(?P<duration>[\d.]+) #secs .* """, re.VERBOSE) delay = 0 tot_time, tot_bytes = 0, 0 # for normalizing throughput and delay resp. for filename in closest_filenames: infile = open(filename, 'r') for line in infile.readlines(): match = re_ns_result_line.match(line) if match is None: continue duration = float(match.group('duration')) throughput = float(match.group('throughput')) delay += float(match.group('delay')) * throughput * duration tot_time += duration tot_bytes += throughput * duration # exact units don't matter if tot_time == 0 or tot_bytes == 0: print "Warning: Nothing found for ", (cc_type, vary_type, value) return (1, 1) throughput = tot_bytes / tot_time delay /= tot_bytes return (throughput, delay) def plot_data(mahimahi_directory, remysim_directory, vary_link, delta=1): """ Plot the utility function w.r.t link rate or minrtt mahimahi_directory -- directory where raw results from conduct_mahimahi_expt are stored remysim_directory -- directory where results from the remy simulator are stored vary_link -- if True, it is assumed that the link rate is to be varied. Else minrtt is varied delta -- adjustable factor in computing utility """ assert(type(mahimahi_directory) is str) assert(type(remysim_directory) is str) assert(type(vary_link) is bool) re_mahimahi_name = re.compile(r""" rawout-(?P<ratname>.*)- (?P<linkrate>[0-9.]+)- (?P<minrtt>[0-9.]+)- (?P<numsenders>[0-9]+)$ """, re.VERBOSE) re_mahimahi_kernel_name = re.compile(r""" rawout-(?P<ratname>cubic)- (?P<linkrate>[0-9.]+)- (?P<minrtt>[0-9.]+)- (?P<numsenders>[0-9]+)-tcptrace$ """, re.VERBOSE) re_mahimahi_nash_name = re.compile(r""" rawout-(?P<ratname>nash[0-9.]*)- (?P<linkrate>[0-9.]+)- (?P<minrtt>[0-9.]+)- (?P<numsenders>[0-9]+)$ """, re.VERBOSE) re_remysim_name = re.compile(r""" rawout-remysim-(?P<ratname>.*)- (?P<linkrate>[0-9.]+)- (?P<minrtt>[0-9.]+)- (?P<numsenders>[0-9]+)$ """, re.VERBOSE) re_remysim_data = re.compile(r"sender: \[tp=(?P<throughput>[0-9.]+), del=(?P<delay>[0-9.]+)\]") file_names = [ os.path.join(mahimahi_directory, f) for f in os.listdir(mahimahi_directory) if os.path.isfile(os.path.join(mahimahi_directory, f)) ] # if link speed is varied, fixed value the rtt, else it is the link speed fixed_value = -1 # format: key - ratname, value - [(linkrate or rtt, utility), ...] values = {} tmp_values = [] for filename in file_names: if filename.find('remysim') != -1: match = re_remysim_name.match(filename.split('/')[-1]) remysim = True elif filename.find('cubic') != -1: match = re_mahimahi_kernel_name.match(filename.split('/')[-1]) remysim = False elif filename.find('nash') != -1: match = re_mahimahi_nash_name.match(filename.split('/')[-1]) remysim = False else: match = re_mahimahi_name.match(filename.split('/')[-1]) remysim = False if match is None: continue linkrate, minrtt = match.group('linkrate'), match.group('minrtt') ratname = match.group('ratname') + ('', '-remysim')[remysim] + ' ' + match.group('numsenders') + 'senders' print ratname, linkrate, minrtt if filename.find('remysim') != -1 or filename.find('us-') != -1: print "Ignoring ", filename continue if ratname not in values: values[ratname] = [] if remysim: infile = open(filename, 'r') throughput, delay, numsenders = 0, 0, 0 for line in infile.readlines(): line_match = re_remysim_data.match(line) if line_match == None: continue throughput += float(line_match.group('throughput')) delay += float(line_match.group('delay')) numsenders += 1 if numsenders < 2: print "Skipping ", filename continue assert( numsenders == 2 ) throughput, delay = throughput/numsenders, delay/numsenders throughput /= (numsenders + 1)/2.0 # divide by E[ #senders ] throughput /= float(linkrate) # normalize delay -= float(minrtt) else: if filename.split('/')[-1].find('cubic') != -1: data = parse_tcptrace.parse_file(filename, endpt_name="100.64.0.1", dst_endpt_name="100.64.0.1") else: data = parse_ctcp_output.parse_file(filename) throughput, delay = analyse_data.weighted_means(data) if throughput == None or delay == None: print "Warning: No data present in ", filename continue throughput /= 1e6 * float(linkrate) # convert to MBps and normalize throughput *= 1500.0 / 1468.0 # compensate for differences in data sizes delay -= float(minrtt) #delay /= float(minrtt) utility = math.log(throughput, 10) - delta*math.log(delay, 10) if vary_link: fixed_value = minrtt value = float(linkrate)*8 else: fixed_value = linkrate*8 value = float(minrtt) values[ratname].append( (value, utility) ) # ns_throughput, ns_delay = get_nearest_ns_throughput_delay( # ratname, # ('delay', 'link')[vary_link], # value # ) # ns_throughput /= float(linkrate) * 8 # ns_utility = math.log(ns_throughput, 10) - delta*math.log(ns_delay, 10) # if ratname+'-ns2' not in values: # values[ratname+'-ns2'] = [] # values[ratname+'-ns2'].append( (value, ns_utility) ) tmp_values.append( (value, throughput, delay, ('mahimahi', 'remysim')[remysim]) ) # tmp_values.append( (value, ns_throughput, ns_delay, 'ns') ) tmp_values.sort(cmp=lambda x,y: ((1, -1)[x[0] > y[0]], 0)[x[0] == y[0]]) for x in tmp_values: print x colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', '0.75', '0.5', '0.25', '#663311', '#113366'] color_ctr = 0 print "For " + ('rtt', 'link rate')[vary_link] + " = " + str(fixed_value) for rat in values: if rat.find('ns2') != -1: continue print rat, '\t', colors[color_ctr] values[rat].sort(cmp=lambda x,y: ((-1, 1)[x[0] > y[0]], 0)[x[0]==y[0]]) try: x, y = zip(*values[rat]) except ValueError: print "Warning: No data for rat '" + rat + "'" continue plt.plot(x, y, colors[color_ctr], alpha=0.8, label=rat) color_ctr += 1 if vary_link: plt.xlabel('Link rate (Mbps)') plt.xscale('log') else: plt.xlabel('Min. RTT (ms)') plt.ylabel('Utility (mahimahi)') plt.legend(loc='lower center', bbox_to_anchor=(0.5, 0)) plt.show() color_ctr = 0 for rat in values: if rat.find('ns2') == -1: continue print rat, '\t', colors[color_ctr] values[rat].sort(cmp=lambda x,y: ((-1, 1)[x[0] > y[0]], 0)[x[0]==y[0]]) try: x, y = zip(*values[rat]) except ValueError: print "Warning: No data for rat '" + rat + "'" continue plt.plot(x, y, colors[color_ctr], alpha=0.8, label=rat) color_ctr += 1 if vary_link: plt.xlabel('Link rate (Mbps)') plt.xscale('log') else: plt.xlabel('Min. RTT (ms)') plt.ylabel('Utility (ns2)') plt.legend(loc='lower center', bbox_to_anchor=(0.5, 0)) plt.show() if __name__ == '__main__': argparser = argparse.ArgumentParser(description='Plot raw results obtained from automatic-tests_vary-mahimahi-configs') argparser.add_argument('--input_directory', type=str, default="", help="Directory where results are stored") argparser.add_argument('--vary_link', type=bool, default="", help="If specified, assumes link rate is varied. Else min rtt is assumed to be varied") argparser.add_argument('--ns_results', type=str, help="Directory where results from ns simulations are stored") cmd_line_args = argparser.parse_args() print "Preprocessing ns results directory" preprocess_ns_directory(cmd_line_args.ns_results) plot_data(cmd_line_args.input_directory, "", cmd_line_args.vary_link) ```
[ { "content": "Repeat the code precisely as written (spacing intact):\n```python\npeforth\n\n[x] 13:59 2017-07-31 找到 JavaScript eval() equivalent in Python\n https://stackoverflow.com/questions/701802/how-do-i-execute-a-string-containing-python-code-in-python\n 成功了!!\n >>> mycode = 'print (\"hello world...
[ { "content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\npeforth\n\n[x] 13:59 2017-07-31 找到 JavaScript eval() equivalent in Python\n https://stackoverflow.com/questions/701802/how-do-i-execute-a-string-containing-python-code-in-python\n 成功了!!\n >>> mycode = 'prin...
```python peforth [x] 13:59 2017-07-31 找到 JavaScript eval() equivalent in Python https://stackoverflow.com/questions/701802/how-do-i-execute-a-string-containing-python-code-in-python 成功了!! >>> mycode = 'print ("hello world")' >>> exec(mycode) hello world >>> The technique of returning a function from another function is known as currying: https://stackoverflow.com/questions/14261474/how-do-i-write-a-function-that-returns-another-function Python annoymous function lambda http://blog.csdn.net/majianfei1023/article/details/45269343 https://www.zhihu.com/question/20125256 [x] review project-k , should project-k support python too? which will be peforth.py 10:04 2019/11/26 it's projectk.py now. [x] 直接問 pyforth 的原作者的版權條件 ---> 用不著了. [x] 實驗用 exec() 生成一個 function s = ''' def show(s): print(s) ''' exec(s) >>> show('abc') abc >>> 成功了! [x] Try to define an python object s = ''' class a(): vm = None def b(self): # self is must print(b) # b unknown print(self) print(a) vm = self c = a() ''' exec(s) [x] peforth 可以引用的讀檔範例 # average5 .py def main() : fileName = input ("What file are the numbers in? " ) infile = open (fileName, ' r ') sum = 0 count = 0 for line in infile: sum = sum + eval (line) count = count + 1 print ("\nThe average Of the numbers is", sum / count) main ( ) # average6.py def main() : fileName = input ("What file are the numbers in? " ) infile = open ( fileName sum = 0.0 count = 0 line = infile.readline() while line != "" sum = sum + eval(line) count = count + 1 line = infile.readline() print("\nThe average Of the numbers is", sum / count) main() [x] module 的用法搞懂了,很簡單。 peforth.py 就是 peforth VM. 不需要像 javascript 用一個 function 把整個 vm 包起來, see GitHub\peforth\projectile.py python can redefine functions and methods. Function and methods are variables too. python objects, like javascript, can add properties and methods through simply assign a value to it. >>> type(show) # show is an object <class 'projectile.Projectile'> >>> show <projectile.Projectile object at 0x000001C6260D0438> >>> show.x = 0 # assign new property to show >>> show.y = 11 >>> show.p = 22 >>> dir(show) # check it out ['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', 'getHere', 'getX', 'getY', 'p', 'update', 'x', 'xpos', 'xvel', 'y', 'ypos', 'yvel'] >>> [x] python 也可以 see function 的 source code https://stackoverflow.com/questions/427453/how-can-i-get-the-source-code-of-a-python-function def foo(a): x = 2 # how about a comment? return x + a import inspect # inspect.getsource(foo) # u'def foo(a):\n x = 2\n return x + a\n' print (inspect.getsource(foo)) ==> 結果完全成功, 連 comment 也被顯示出來。 ==> 但是 py> py: 組合出來的 function 不行 py> tick('test').cfa ==> 1 py> dictionary[1:] ==> [.s, <function <lambda> at 0x0000024CE15810D0>, .s, <function <lambda> at 0x0000024CE1581158>, .s, None, None] OK py> inspect.getsource(dictionary[2]) . could not get source code <------------------- error message Debug? [y/N] 同一篇 stackoverflow 介紹的 dis module 也真的可行! >>> import dis >>> def func(x): ... print(x+1) ... >>> func(123) 124 >>> dis.dis(func) 2 0 LOAD_GLOBAL 0 (print) 2 LOAD_FAST 0 (x) 4 LOAD_CONST 1 (1) 6 BINARY_ADD 8 CALL_FUNCTION 1 10 POP_TOP 12 LOAD_CONST 0 (None) 14 RETURN_VALUE >>> 哇! 顯示出 function 的機械碼, 太正點了!! [x] Python equivalent of: Word.prototype.toString = function(){return this.name + " " + this.help}; // every word introduces itself --> 有了, 就是去定義 __str__ prototype of the class #------- ex2.py --------------- class d(): def __str__(self): return "a __str__" def __repr__(self): return "a __repr__" class x(): name = 'stella' feet = 'splender' #------------------------------ >>> import ex2 >>> x = ex2.x() >>> x <ex2.x object at 0x00000170D77202B0> <---- default __repr__ 打印 >>> print(x) <ex2.x object at 0x00000170D77202B0> <---- default __str__ 傳回值 >>> d = ex2.d() >>> d # <--------- 執行該 obj 時, 打印 __repr__() 的傳回值 a __repr__ # 應該讓它執行該 word >>> print(d) # <---- obj 本身的傳回值是 __str__() 的傳回值 a __str__ >>> [x] 進一步刺探未來的 peforth.py kernel module 的特性 Ynote: 搞懂 python 的 module files globals() locals().note [x] docode() 要組裝 function 需參考 anonymous function 的定義方法: https://stackoverflow.com/questions/6629876/how-to-make-an-anonymous-function-in-python-without-christening-it Study built-in function exec() https://docs.python.org/3/library/functions.html#exec Study build-in function compile() https://docs.python.org/3/library/functions.html#compile [x] genxt() 成功了 [x] IDLE path working directory working folder import sys sys.path.append('c:/Users/hcche/Documents/GitHub/peforth') [x] 12:50 2017/08/12 已經跑起來了, debugging compiling == 'code' 的問題 --> 可能是 end-code 裡面 Word(newname,newxt) 失敗的關係 --> no, it can't fail --> 應該是 docode 裡面, 結構不太好, 萬一 reDef 或 genxt() 失敗了會怎樣? 很多都會半途結束, 留下 compiling == 'code' 的問題。 --> all tested, behavior acceptable now [x] "import re" in peforth.py kernel is not a good choice. Simply letting the main program to do that. The main program is eforth.3py --> Yeah! it works. c:\Users\hcche\Documents\GitHub\peforth>python eforth.3py hello eforth!! --> 錯了, 每個 .py 檔都自己 import re, import pdb 反而是對的, see: https://stackoverflow.com/questions/8957859/python-child-cannot-use-a-module-the-parent-imported ... Generally if you're doing simple obvious things like importing a standard module, you should do it the simple and obvious way...... [x] reproduce the problem: import peforth as vm vm.dictate('code test end-code') # Try this first vm.words['forth'] 這樣是成功的,但是進入 forth command line 之後, 同樣的工作... 還是成功的。 --> 改試 vm.dictate('code test3 print("hello test3!!") end-code') >>> vm.execute('test3') --> hello test3!! 很成功 --> 進 forth command line >>> vm.peforth() OK code test4 print("hello test4") end-code OK test4 hello test4 OK 還是很成功 --> 好像要出過 error e.g. word unknown 之類才能複製問題 >>> code test5 end-code File "<stdin>", line 1 code test5 end-code ^ SyntaxError: invalid syntax >>> 的確是這樣!!! now I've got the SRP --> 似乎是 w.xt(w) 執行 end-code 時出問題, 檢查此時的 end-code RI, outer() 裡面分辨 token 是否 [int, float] 用 eval(token) 會有 exception 必須要用 try - except 處理才行。 --> Fixed !!! [x] why after OK type 'words' no response <--- should be : Error! words unknown. --> 結果發現, 所有的 dir(vm) attributes 都這樣!! (Pdb) eval('pop') ==> <function pop at 0x00000178A534A730> (Pdb) eval('dictionary') ==> [0] (Pdb) eval('stack') ==> [{'forth': [0, code, end-code, //, stop, *debug*]}, {'forth': [0, code, end-code, //, stop, *debug*]}, {'forth': [0, code, end-code, //, stop, *debug*]}, <class 'peforth.Word'>, <function phaseA at 0x00000178A534A0D0>, <function phaseB at 0x00000178A534A158>] 所以, outer() 還要再改良。 --> eval() 的結果 + 0 就可以保證他是 number 了 [x] kernel project-k.py instead of peforth.py [X] code word's help, not easy, keep the record. # stack diagram ntibwas, s = ntib, nextstring("\\(") if s['flag']: # stack diagram is existing pdb.set_trace() newhelp = '( ' + nexttoken('\\)') + nexttoken() + ' ' else: # return s to tib ntib = ntibwas # word description ntibwas, s = ntib, nextstring("\\") if s['flag']: # description is existing newhelp += nexttoken('\n|\r') else: # return s to tib ntib = ntibwas code \ last().help += nexttoken('\n|\r'); end-code immediate // ( <comment> -- ) Give help message to the new word. code ( last().help = '( ' + nexttoken('\\)') + nexttoken() + ' ' end-code immediate // ( -- ) Get stack diagram to the last's help. --> v1.23 code words 可以用 # 下 help 了。 [x] In jeforth, window.colonxt is dynamicly created by definition of ':'. Can peforth.f do that too in python? Yes!!! >>> def test(): ... globals()['cc'] = 123 ... >>> cc Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name 'cc' is not defined >>> test() >>> cc 123 >>> [/] : test ; 'module' object does not support item assignment Debug? [y/N] y RI: last().xt = xt # also vm['colonxt'] <------ [/] easy, deal with this later [x] After the above error probably, after colon definition the compiling is still True!!! --> because forgot declare it a global. B i n g o ! ! colon definition works now [x] literal needs to use closure def gen(n): # function generator def f(): # literal run time function print(n) f.description = "{} {}".format(type(n),n) return f f = gen([11,22,33]) f() >>> f.description "<class 'list'> [11, 22, 33]" # functions are not shown by __str__ and __repr__ like dict # def str(self): # return help message # return f.description # def repr(self): # execute xt and return help message # return f.description # str.desc = "I am str" # repr.desc = "I am repr" # f.__str__ = str # f.__repr__ = repr [x] py> py: 都應該改用 compile(code,"") compile CN http://www.th7.cn/Program/Python/201608/923063.shtml 用到 lambda 就不能用來【賦值】, 安全理由. 故 py: 不能用 lambda. 要的話就必須用 compile 的。 https://stackoverflow.com/questions/20695745/why-use-lambdas-vs-1-line-function-declarations --> [x] 已經發現 py: tick('//').immediate=True 行不通了!!! --> 用 <py> </py> </pyV> 分別改寫了 py: py> , ok now [x] pyExec pyEval 是多餘的 --> 去除 [x] (Pdb) execute("sdfsdf") (Pdb) 沒半點錯誤訊息, 有問題看不出來!! --> fixed, now it's a panic. [x] compiling 未定義怎麼不觸發 unknown? --> outer() 用 eval(token) 想判斷 token 是否 number 不行, 當 token='compiling' 時不會觸發 exception 反而傳回其值 True or False !! --> 改用 complex(token) 很完美! [x] t> >t t@ >>> line = 'Cats are smarter than dogs\n\\ 1234\n\\ 2233' >>> matchObj = re.search( r'\n\\ (\d*)$', line) >>> matchObj.group() '\n\\ 2233' >>> matchObj.group(1) '2233' >>> len(matchObj.group()) 7 >>> line[:-7] 'Cats are smarter than dogs\n\\ 1234' >>> [x] [/py] [/pyV] 只分別取得 exec-code 與 eval-code 不執行, 可以用 execute 執行嗎? [x] execute 也要能執行 exec-code 或 eval-code ---> done [x] 這兩個都不要,應該是個 compyle ( 'source' -- code object ) \ python compiler 指令 [x] 讓 execute() 認得 code object --> OK ' compyle . compyle ( "source" -- exec-code ) Python compile source to exec-code object __str__ OK OK char print('hi') compyle OK execute hi OK 一次就成功了!! [x] colon definition 裡看能不能用 comma 塞入一個 code object ? --> : test char print('hi') compyle execute ; 成功 : test2 [ char print('hi') compyle , ] ; 也成功 : cfa ' py> dictionary[pop().cfa:] . cr ; OK cfa test2 [ /* test2 */ <code object <module> at 0x0000019B24E1F8A0, file "", line 1>, None, /* cfa */ ', <function xt.<locals>.<lambda> at 0x0000019B24E29C80>, ., cr, None, None] OK [x] 有了 compyle 要不要改寫 <py> </py> </pyV> 等? --> 只簡化了 </py> 一點點 [x] debug :: --> root cause 又是 branch 裡 assignment to ip 忘了加 vm.ip OK 11 22 ' + :: xt() .s ==> [33] OK 表示 :: interpret mode 功能 ok OK : test :: xt() ; --Return-- > <string>(2)xt()->None (Pdb) c OK see-cfa test [<code object <module> at 0x000001F1364F68A0, file "", line 1>, None, None] OK 22 33 ' + test OK .s [55] OK [x] constant 要用到 vm.forth['varname'] 複習一下 python 語法 constant 要做的事 --> 'push(vm["forth"]["x"])' 一開始 word-list 都沒有自己的空間 (Pdb) vm['forth'] *** TypeError: 'module' object is not subscriptable (Pdb) vm.forth *** AttributeError: module 'projectk' has no attribute 'forth' 不能這樣 init : (Pdb) vm['forth']={} *** TypeError: 'module' object does not support item assignment 要這樣 init : (Pdb) setattr(vm,'forth',{}) Object 的 attribute 不能這樣 access : (Pdb) vm['forth'] <--- 這是 dict 的方式 *** TypeError: 'module' object is not subscriptable 要這樣 access : (Pdb) vm.forth {} (Pdb) getattr(vm,'forth') {} (Pdb) [x] colon definition 失敗還是會佔一個位置 OK 123 constant x OK 345 to x Error! Assigning to a none-value. Debug? [y/N] OK : test 44445555 to x ; Error! Assigning to a none-value. <--- 馬上觸發錯誤,好。 Debug? [y/N] OK words 0 code end-code // ...snip... to x test <--- test 佔了位置 OK : ttt ; OK words 0 code end-code // ...snip... to x test ttt <--- 確實佔了位置 OK test Error! test unknown. <---- colon definition 失敗, 只是沒有 reveal 而已 Debug? [y/N] OK rescan-word-hash <---- rescan 之後它就會出現!! OK test OK .s [44445555] OK --> jeforth 也一樣, 算了, 有警告就可以了。 --> (forget) 一下可以把它消除掉 [x] tib 平時有被 corrupted OK char $ . rewind OK 11 22 33 *debug* # <---- 最簡單的 (Pdb) tib '112233*debug*' # <----- 就已經有問題了 !!! (Pdb) 問題在 kernel nexttoken() 裡面 --> Root cause 1 : nexttoken() <--- skip leading white spaces 改寫 Root cause 2 : tib and ntib are strange <-- ntib 太大先排除 [x] writeTextFile 實驗 OK <py> open("pathname.txt", "wt")</pyV> constant f reDef f OK f . <_io.TextIOWrapper name='pathname.txt' mode='wt' encoding='cp950'> OK f :> name --> pathname.txt OK OK f :: write("abc") OK f :: write("123") OK f :: write("中文") OK f :: close() encoding='utf-8' [x] refill works now. Use refill to improve <text> first. Let it accept multiple lines. ---> 最後是簡單地引進 accept2 用 Ctrl-D 切換 multiple-line mode 即可. 保留以下研究過程。 : <text>.interpret ( <multi-lines> -- "string" ) // get multi-lines string from ternimal CR word ( s ) begin accept if ( s line ) \ get string to s, leave </text> and the rests in tib by adjusting ntib py> re.search("(.*)</text>(.*)",tos()) ( s line re ) py> bool(tos()) if \ line has </text> ? ( s line re ) py: vm.tib="</text>"+tos().group(2);vm.ntib=0; \ s += re.group(1) nip ( s re ) :> group(1) + ( s ) exit else ( s line re ) \ s += line drop + ( s ) else ( s ) \ s += '\n' py> pop()+'\n' then refill again ; 我發現, bool(regEx) 可以看出 re.search 的結果是否 found [x] See MetaMoji 討論如何適當分割以上複雜的 <text>.interpret 成簡單的 一行成功; 多行輸入 兩段。 其中多行輸入是個公用 routine [x] 實驗後綴法是否有簡化功效? 使 group(1) 成為共同的結果 \ regular expression 實例 OK <py> re.search("(.*?)</text>(.*)","aa </text>bb</text>")</pyV> ( re ) constant re OK re bool . cr ^^^^^^ 故意加上後綴讓 re.search 總是成功 True <--- 總是成功 OK re :> group() . cr aa </text>bb</text> OK re :> group(1) . cr aa <----------------------------- group(1) 為所求 OK re :> group(2) . cr bb</text> <-------------------- group(2) 去掉後綴之後還給 tib OK <py> re.search("(.*?)</text>(.*)","aa bb</text>")</pyV> ( re ) constant re OK re bool . cr True OK re :> group() . cr aa bb</text> OK re :> group(1) . cr aa bb <------------ 當 bool group(2) False 時 group(1) 仍為所求, 故確有簡化功效 OK re :> group(2) . cr OK re :> group(2)=="" . cr True OK re :> group(2) bool . False OK [x] 多行輸入公用 routine [x] 19:46 2020/10/04 複習需要 ^D 多行輸入 multiple lines inpue 的原因:如果是 colon definition 本來 就可以在 compiling state 多行輸入,問題出在 code ... end-code 期間需要 ^D multiple lines input. : accepts ( "deli" <multiple lines> -- "string" ) // Get multiple lines from tib up to delimiter ( deli ) begin accept if ( s line ) \ get string to s, leave </text> and the rests in tib by adjusting ntib py> re.search("(.*)</text>(.*)",tos()) ( s line re ) py> bool(tos()) if \ line has </text> ? ( s line re ) py: vm.tib="</text>"+tos().group(2);vm.ntib=0; \ s += re.group(1) nip ( s re ) :> group(1) + ( s ) exit else ( s line re ) \ s += line drop + ( s ) else ( s ) \ s += '\n' py> pop()+'\n' then refill again ; code accept2 # use Ctrl-D at the end to terminate the input. py> chr(4)=='^D' --> True result, s = "", input() while not chr(4) in s: result += s s = input() result += s.replace(chr(4),'\n') # all ^D become \n push(result) push(True) end-code // ( -- str T|F ) Read a line from terminal. [x] accept can be single line accept1 or multiple lines accept2 , switch by Ctrl-D 8: [EOT] (<class 'str'>) <---- the Ctrl-D from input() OK py> ord(tos()[0]) . cr 4 OK 示範 <accept> ... </accept> 的用法 ------- clipboard --------- dropall <accept> 11 22 33 44 55 </accept> 66 77 88 99 ---------------------------- OK dropall # paste 之後的樣子 OK <accept> 11 22 33 44 55 </accept>66 # 這是最後一行,注意!66 可以往前緊貼, delimiter 會整個被忽略掉。 OK 77 OK 88 OK 99 ---------------------------- OK .s # 看看結果 ....... 0: 11 22 33 44 55 66 (<class 'str'>) 1: True (<class 'bool'>) 2: 77 4Dh (<class 'int'>) 3: 88 58h (<class 'int'>) 4: 99 63h (<class 'int'>) OK [x] .s in trouble when cell is False, None ... etc [x] peforth.py 可以直接執行 : python peforth.py 也可以由 python interpreter 執行: >>> peforth.main() 此時 exit 回到 python interpreter bye 則會傳回 errorlevel 回到 DOS. # 從 python interpreter 就可以看到 peforth.py module 裡的 globals >>> dir(peforth) ['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'greeting', 'main', 'panic', 'readTextFile', 'vm', 'writeTextFile'] # 從 python interpreter 更可以看到 project-k vm 裡的 globals >>> dir(peforth.vm) ['EXIT', 'RET', 'Word', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'code', 'colonxt', 'comma', 'compiling', 'context', 'context_word_list', 'current', 'current_word_list', 'debug', 'dictate', 'dictionary', 'dis', 'docode', 'doendcode', 'endcode', 'execute', 'forth', 'genxt', 'greeting', 'here', 'inner', 'inspect', 'ip', 'isReDef', 'json', 'last', 'major_version', 'multiple', 'name', 'newhelp', 'newname', 'newxt', 'nextstring', 'nexttoken', 'ntib', 'order', 'os', 'outer', 'panic', 'pdb', 'phaseA', 'phaseB', 'pop', 'push', 're', 'readTextFile', 'reset', 'rstack', 'rtos', 'stack', 'stop', 'tib', 'tick', 'tos', 'version', 'vm', 'vocs', 'wordhash', 'words', 'writeTextFile'] # 從 python interpreter 也可以執行 peforth >>> peforth.vm.dictate <function dictate at 0x000001D1368E2510> >>> peforth.vm.dictate('version') p e f o r t h v1.01 source code http://github.com/hcchengithub/peforth # 在 peforth 裡面定義的東西, 回到 python interpreter 取用: >>> peforth.main() OK 123 constant x OK exit >>> peforth.vm.forth {'obj2dict': <function object2dict at 0x000001D136934510>, 'x': 123} >>> peforth.vm.forth['x'] --> 123 # 用 obj2dict() 把 Word 轉成 dict, 這是 see 的準備 >>> peforth.vm.forth['obj2dict'](peforth.vm.tick('+')) {'__class__': 'Word', '__module__': 'projectk', 'name': '+', 'xt': <function xt at 0x000001D1368F28C8>, 'immediate': False, 'help': '( a b -- a+b) Add two numbers or concatenate two strings.', 'comment': '', 'vid': 'forth', 'wid': 51, 'type': 'code'} [x] see code words # json 需要先給它 obj2dict() function 才能處理我們的 object OK py> json.dumps(tick('+'),indent=4) . Failed to run <Word '</pyV>'>: Object of type 'Word' is not JSON serializable Continue, Debug, or Abort? [C/d/a] a ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # 從 peforth 裡面定義轉換 function <py> def object2dict(obj): #convert object to a dict d = {} d['__class__'] = obj.__class__.__name__ d['__module__'] = obj.__module__ d.update(obj.__dict__) return d push(object2dict) </py> ^D OK .s 0: <function object2dict at 0x000001D136934510> (<class 'function'>) OK constant obj2dict OK exit # 有了轉換 function 就可以讓 json 完成工作 >>> import json >>> print(json.dumps(peforth.vm.tick('+'),default=peforth.vm.forth['obj2dict'],indent=4)) { "__class__": "Word", "__module__": "projectk", "name": "+", "xt": { "__class__": "function", "__module__": "projectk", "source": "def xt(_me=None): ### + ###\n push(pop(1)+pop()) \n", "name": "+" }, "immediate": false, "help": "( a b -- a+b) Add two numbers or concatenate two strings.", "comment": "", "vid": "forth", "wid": 51, "type": "code" } >>> [x] code object 希望能帶 source code 以供 see OK 45 @ dir . ['__class__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'co_argcount', 'co_cellvars', 'co_code', 'co_consts', 'co_filename', 'co_firstlineno', 'co_flags', 'co_freevars', 'co_kwonlyargcount', 'co_lnotab', 'co_name', 'co_names', 'co_nlocals', 'co_stacksize', 'co_varnames'] OK OK --> 不行, code object 裡面不能新增 attribute 也不能改裡面的 若不行, 只好模仿 Word 弄成一個 class 來裝 code object 就可以帶上 source code 或用 closure , 也就是 genxt() 的方法也是現成已經成功的辦法。也不見得比 compyle 差。 或用 dis.dis(func) 也好, 更具視覺效果 [x] 想到給 code object 加上 source code 顯示的辦法了, 引進 class Comment, 類似 class Word 但是 do nothing (由 phaseA phaseB 實現) 只帶著 comment comma(Comment('lalalal')) 進 dictionary 裡去躺著,等 see command 來利用。 OK py: comma(Comment("lalala")) OK here OK . 637 OK 636 @ . lalala OK 636 @ type . --> <class 'projectk.Comment'> OK 636 @ . lalala OK 636 @ execute --> Failed to run <Word 'execute'>: must be str, not Comment Continue, Debug, or Abort? [C/d/a] a [x] modify phaseA phaseB to support Comment class --> done! [x] modify ::, :>, </py>, and </pyV> to add comment [x] 目前 literal 仍被當一般 function 用 dis.dis() 顯示 --> 改成顯示 literal OK 339 @ . # 已知 339 處是個 literal function <function xt.<locals>.f.<locals>.literal at 0x000001ED9B6579D8> OK 339 @ :> __name__ . OK 339 @ :> str . # 印出 readable 的方法 Literal: pop(). <class 'str'> OK --> 可以修改 toString 了 ==> see 終於完成了!!! [x] 其實 __doc__ attribute 就是用來放說明文字的 . . . --> 錯! Failed to run <Word '</py>'>: 'code' object attribute '__doc__' is read-only Continue, Debug, or Abort? [C/d/a] 可是我試過了呀!? 如下: 00035: RET (<class 'NoneType'>) 00036: Literal: \\n|\\r <class 'str'> 00037: RET (<class 'NoneType'>) 00038: lambda:push(eval(vm.greeting())) (<class 'projectk.Comment'>) 00039: (<class 'function'>) 7 0 LOAD_GLOBAL 0 (push) 2 LOAD_GLOBAL 1 (eval) 4 LOAD_DEREF 0 (eval_code) 6 CALL_FUNCTION 1 8 CALL_FUNCTION 1 10 RETURN_VALUE OK 39 @ . <function xt.<locals>.<lambda> at 0x0000017E8D269598> OK OK 39 @ dir . ['__annotations__', '__call__', '__class__', '__closure__', '__code__', '__defaults__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__get__', ...snip...] OK 39 @ :> __doc__ . None OK 39 @ :: __doc__="abc" OK 39 @ :> __doc__ . abc OK 這是 : version py> vm.greeting() ; // ( -- revision ) print the greeting message and return the revision code compile() 出來的 eval_code, exec_code 的 __doc__ 都是 read-only, 但是 包過一層 lambda 之後就可以編寫了。 <------ 真相大白!! --> </py> 直接 comma(exec_code) 實在沒有好處, 犧牲了 __doc__ 又 迫使 phaseB 無謂地變得複雜。 --> [x] 改掉! [x] these lines are strange, "" value description ( private ) // ( -- "text" ) description of a selftest section [] value expected_rstack ( private ) // ( -- [..] ) an array to compare rstack in selftest [] value expected_stack ( private ) // ( -- [..] ) an array to compare data stack in selftest 0 value test-result ( private ) // ( -- boolean ) selftest result from [d .. d] [] value [all-pass] ( private ) // ( -- ["words"] ) array of words for all-pass in selftest the "( private )" become prefix of their word.help ! --> value command gets stack diagram ? --> ( command 看到 last 沒有 help 就把後續的 (...) comment 加進去了! 應該限制 compiling state 才這麼做。 [x] *** debugging, OK now. RI: constant and value were in trouble due to that I changed the Comment word and the way to compile code objects. [x] python shell and eforth 互相參考手上的資料 >>> peforth.main() OK 0 constant shell # peforth 定義的變量 OK exit # 從外面把 globals() 給它 >>> getattr(peforth.vm,'forth')['shell']=globals() >>> peforth.vm.forth {'obj2dict': <function obj2dict at 0x000002C8D8F5B1E0>, 'description': '', 'expected_rstack': [], 'expected_stack': [], 'test-result': 0, '[all-pass]': [], 'shell': {'__name__': '__main__', '__doc__': None, '__package__': None, '__loader__': <class '_frozen_importlib.BuiltinImporter'>, '__spec__': None, '__annotations__': {}, '__builtins__': <module 'builtins' (built-in)>, 'peforth': <module 'peforth' from 'c:\\Users\\hcche\\Documents\\GitHub\\peforth\\peforth.py'>}} >>> peforth.main() OK shell . {'__name__': '__main__', '__doc__': None, '__package__': None, '__loader__': <class '_frozen_importlib.BuiltinImporter'>, '__spec__': None, '__annotations__': {}, '__builtins__': <module 'builtins' (built-in)>, 'peforth': <module 'peforth' from 'c:\\Users\\hcche\\Documents\\GitHub\\peforth\\peforth.py'>} OK # 從外面 DOS copy-paste 進來,一氣呵成 (不要 indent, 用 block mode) python import peforth peforth.vm.dictate('0 constant shell') peforth.vm.dictate('// ( -- dict ) 最外層 python interpreter 的 globals()') getattr(peforth.vm,'forth')['shell']=globals() peforth.main() # 從 python interpreter 切換進入 peforth \ 進入了 peforth interpret state <accept> \ 從 terminal 收取跨行 input lines <py> import sys push(sys)</py> constant sys // ( -- sys ) The sys module. Try: sys py: help(pop()) </accept> \ ( -- string T|f ) 從 terminal copy-paste 進來的 string [if] tib.insert help sys [then] [x] examples tools utilities goodies 範例 栗子 例子 \ 列出所有的 code words <py> [w.name for w in words['forth'][1:] if 'code' in w.type] </pyV> \ 列出所有的 selftest passed words <py> [w.name for w in words['forth'][1:] if 'pass'==getattr(w,'selftest',False)] </pyV> . cr \ 列出所有 immediate words <py> [w.name for w in words['forth'] if getattr(w,'immediate',False) ] </pyV> . cr \ 把尾巴 2 個 TOS 切出來成為單獨的 list (array) ( -2 ) >r py: t,vm.stack=stack[rtos(1):],stack[:rpop(1)];push(t) --> slice \ Execute DOS command OK <py> exec('import os',globals(),globals())</py> # import the os module OK py: os.system('dir') Volume in drive C is Windows Volume Serial Number is 2EA4-3202 Directory of c:\Users\hcche\Documents\GitHub\peforth 2017-08-23 09:31 <DIR> . 2017-08-23 09:31 <DIR> .. 2017-07-31 20:35 65 .gitattributes 2017-06-25 13:31 18,226 voc.f 2017-08-25 13:03 <DIR> __pycache__ 10 File(s) 178,951 bytes 3 Dir(s) 264,579,960,832 bytes free OK # But after <py> os.system(r"cd c:\Users\hcche\Documents\GitHub\ML\WH300")</py> the peforth working directory is not changed. It changes only the temperary shell. \ copy 以下 comment (用 np++ column mode) 從 DOS box Ctrl-V 一路跑起來 <comment> python import peforth peforth.vm.dictate('0 constant shell') peforth.vm.dictate('// ( -- dict ) 最外層 python interpreter 的 globals()') getattr(peforth.vm,'forth')['shell']=globals() peforth.main() # 從 python interpreter 切換進入 peforth \ 進入了 peforth interpret state <accept> \ 從 terminal 收取跨行 input lines <py> import sys push(sys)</py> constant sys // ( -- sys ) The sys module. Try: sys py: help(pop()) </accept> \ ( -- string T|f ) 從 terminal copy-paste 進來的 string [if] tib.insert help sys [then] </comment> \ DOS command line one-liner to print the path environment variable c:\Users\hcche\Desktop>python -m peforth s' push(os.get_exec_path())' compyle execute (see) bye [x] <accept> <py> does not work when unless putting <py> to next line <---- problem --> rest of the line after <accept> should be the first line of the multiple lines [x] OK include c:\Users\hcche\Documents\GitHub\ML\WH300\wh300.f C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\sklearn\cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20. "This module will be removed in 0.20.", DeprecationWarning) Failed to run <Word 'sinclude'>: pop from empty list Continue, Debug, or Abort? [C/d/a] a OK --> possibly because rstack are to be used to return while reset() ( stop command ) clears the rstack. --> 應該是猜對了。 stop command 只能中斷 outer loop 不能把 rstack 清掉!! [x] let <accept> <text> auto indent. Use spaces before </accept> </text> as the common strip. --> study <text> </text> 直接用 BL word 把 </text> 之前的 spaces 都忽略掉了, 這裡要改一下。 --> code test push(nextstring('[^ ]')) end-code test 123 得到: 0: {'str': ' ', 'flag': True} (<class 'dict'>) 1: 123 7Bh (<class 'int'>) 用來取得 </text> 之前的 spaces --> 這只是一法,也不太好。 --> 不如取所有 lines 的 leading spaces 之最大公因數,一律刪除就對了。 1. 切成 lines in an array </text> :> splitlines() ( [lines] ) 2. 算出每行的前導 spaces 個數 len - lstrip OK s" abc" py> len(pop()) tib. s" abc" py> len(pop()) \ ==> 7 (<class 'int'>) OK s" abc" :> lstrip() py> len(pop()) tib. s" abc" :> lstrip() py> len(pop()) \ ==> 3 (<class 'int'>) OK 3. 取最小值, OK py> min([1,2,3]) tib. py> min([1,2,3]) \ ==> 1 (<class 'int'>) OK 4. 每行都去除這麼多前導 spaces [ e for e in m] cls dropall <accept> <text> line1 line2 line3 line4 line5 </text> constant lines </accept> drop tib.insert lines :> splitlines() constant [lines] <py> map(lambda x:len(x)-len(x.lstrip()),vm.forth['[lines]'])</pyV> constant leading-spaces // ( -- map ) 只能用一次! \ 檢查 leading-spaces 有兩種方法,後者才漂亮 \ <py> [i for i in vm.forth['leading-spaces']]</pyV> tib. \ check leading-spaces \ leading-spaces py> list(pop()) . \ OK leading-spaces py> list(pop()) . # 如果 map 不大這個可以考慮 \ [12, 16, 16, 16, 16, 8] OK \ OK leading-spaces py> list(pop()) . # map 之類的 iterator 都不能 rewind/reset \ [] OK leading-spaces py> min(pop()) constant common-indent [lines] common-indent <py> [i[tos():] for i in pop(1)]</pyV> nip constant [result] result py> "\n".join(pop()) constant result // ( -- string ) the cooked multi-lines string : -indent ( multi-lines -- cooked ) // Remove common indent of the string :> splitlines() ( [lines] ) <py> map(lambda x:len(x)-len(x.lstrip()),tos())</pyV> ( [lines] map[^spaces] ) py> min(pop()) ( [lines] indent ) <py> [i[tos():] for i in pop(1)]</pyV> nip ( [result] ) py> "\n".join(pop()) ; code -indent lines = pop() array = lines.splitlines() # [lines] spaces = map(lambda x:len(x)-len(x.lstrip()),array) # [spaces] indent = min(spaces) # number of common indent cut = [i[indent:] for i in array] # [cuted lines] push("\n".join(cut)) end-code // ( multi-lines -- cooked ) Remove common indent of the string bingo! it works! [x] don't need to use map in -indent, use [f(i) for i in lines.splitlines()] should be enough --> Yes! The following two lines are equivalent: spaces = map(lambda x:len(x)-len(x.lstrip()),array) # iterator spaces = [len(x)-len(x.lstrip()) for x in array] # list [x] Start to use peforth for the wh300 project . . . 用 peforth 來實現 wh300 第一個好消息就是 import module 變成 forth word 成功了!! <py> import numpy as np push(np) </py> constant np // ( -- numpy ) The numpy module OK np . <module 'numpy' from 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\numpy\\__init__.py'> OK OK --> import to globals() is the best way. The above method is interesting but not the best. --> Done ! wh300.f works fine now. [x] -indent 很聰明地 " "*100 的花招把 </text> 之前的線索給毀了!!! 目前變成過度 indent. --> 過度 indent 修好了, constant 的 runtime 又出問題。因為是 runtime, root cause 很難找。 Root cause : 下面 lambda 的 code 內縮了,應該不要。所以是 -indent 有問題。 str__', '__subclasshook__'] OK py> dictionary[456].__doc__ . lambda:exec( source = '\tpush(getattr(vm,"{}")["{}"])'.format(current, last().name) last().xt = genxt('constant',source) if not getattr(vm,current,False): setattr(vm,current,{}) exec('getattr(vm,"{}")["{}"]=pop()'.format(current, last().name)) ) OK 123 constant x Failed to run <function xt.<locals>.<lambda> at 0x000001C0C39B61E0>: unexpected indent (<string>, line 2) Continue, Debug, or Abort? [C/d/a] a OK --> 對照 ok of 'see constant' 可見得上面問題版的 lambda source code 裡有多的 indent ------------ Definition in dictionary ------------ 00456: lambda:exec( source = '\tpush(getattr(vm,"{}")["{}"])'.format(current, last().name) last().xt = genxt('constant',source) if not getattr(vm,current,False): setattr(vm,current,{}) exec('getattr(vm,"{}")["{}"]=pop()'.format(current, last().name)) ) --> 先用醜版面過關取得完整功能, 再來對付它。 --> interpret state ok, try compile --> ok too --> so what's the problem..it's clear 當 <py> 之後跟著兩個 space 時其實這個實驗就已經複製到問題了, 厲害的是要到 test 的 runtime 才會執行 lambda 從而觸發到 unexpected indent ... 難怪這麼難抓!! : test <py> a=1 b=2 c=3 </py> ; --> breakpoint 在 -indent 當 last==constant 時 code -indent if debug and last().name=='constant': pdb.set_trace() <--- 斷到了 ...snip.... --> constant 改到有問題的 <py>..</py> 版本 --> 看看這時 -indent 收到啥 |(Pdb) p lines |' \n source = \'\\tpush(getattr(vm,"{}")["{}"])...snip... ^---- 這個 space 就是問題所在了 !!!! 真難找。 --> Root cause: in constant source code, after the <py> an extra space was there! --> See Ynote : "peforth -indent command indent 問題探討-- 成功了! 扫描_20170828180311 _peforth_" [X] reset() 能不能強一點? panic() 好幾次很煩....也許有意義? [x] compyle 裡用到 lambda 來產生 function 有問題! # 這個可以! >>> s = ''' ... dd = {'a':123,'b':456} ... print(dd) ... v = [dd[i] for i in dd] # 取得所有的 value of a dict ... print(v) ... ''' >>> exec(s) # <----------- 直接執行 exec() 很好,沒問題 {'a': 123, 'b': 456} [123, 456] -- # 經過 lambda 之後 local name space 就有怪現象了 # 如下不行了, 這是經過 lambda 之後產生的結果。 compyle command 不要用 lambda . . . . ... s = ''' ... dd = {'a':123,'b':456} ... print(dd) ... v = [dd[i] for i in dd] # 取得所有的 value of a dict ... print(v) ... ''' >>> f = lambda:exec(s) >>> f() {'a': 123, 'b': 456} NameError: name 'dd' is not defined >>> --> compyle 裡改用 genfunc(source) 來產生 function ----- this snippet works fine ------------ <py> # python does not support annoymous function. But it supports closure, # so we can recover it. genfunc("body","args") returns a function which # is composed by the given source code and arguments. def genfunc(body,args): local = {} source = "def func({}):".format(args) # args is something like "", or 'x, y=123,z=None' if body.strip()=="": source = source+"\n pass\n"; else: source = (source+'\n{}').format(body) try: exec(source,globals(),local) except Exception as err: panic("Failed in genfunc(body,{}): {}\nBody:\n{}".format(args,err,body)) local['func'].__doc__ = source return local['func'] push(genfunc) </py> constant genfunc // ( -- func ) function generater genfunc(body,args) genfunc <py> pop()(' print("hi")',"")</pyV> :: () \ ==> hi ( arguments ) s" x,y" ( body ) <text> result = x**2 + y**2 print(result) </text> -indent genfunc :> (pop(),pop()) constant f // ( -- func ) f(3,4) prints 25 which is 3^2+4^2 f :: (3,4) \ ==> 25 ----- this snippet works fine ------------ 結果: ^D hi <--- 正確,正確 25 Multiple-line mode is on, Ctrl-D switches it off. OK --- genfunc() 進了 project-k kernel ----------- ( name ) s" lalala" ( arguments ) s" x,y" ( body ) <text> result = x**3 + y**3 print(result) </text> -indent py> genfunc(pop(),pop(),pop()) constant f f :: (3,4) # it works fine !! --- 有問題要到 runtime 才會發現, 故 selftest 很重要 ----------- ( name ) s" lalala" ( arguments ) s" x,y" ( body ) <text> result = x*y print(resultttttttt) </text> -indent py> genfunc(pop(),pop(),pop()) constant f \ 到這裡都沒問題, 以下執行了才發現問題,而且 error message 線索差很遠 OK f :: (1,2) Failed in </py> command: name 'resultttttttt' is not defined Body: pop()(1,2) Continue, Debug, or Abort? [C/d/a] ----- it works fine -------------- [x] 改用 genfunc() 取代 lambda 之後, indent 習慣又全變了, 因為 function body 一定要 indent 而與原來的 exec(body) 相反。 共有 <py> py> py: :: :> 這些 東西受影響, 剩下 :: :> 要改 --> all done. [x] Now without lambda (genfunc instead) test the original problem: <py> dd = {'a':123,'b':456} print(dd) v = [dd[i] for i in dd] # 取得所有的 value of a dict print(v) </py> results: {'a': 123, 'b': 456} [123, 456] <---------------- Pass!! [x] code compyle execute('-indent');execute('indent') 若用 dictate('-indent indent') 則無效, 何故? --> 以下實驗卻又都 ok ! --> RI: 因為當時在 compiling state !! 用 dictate() 的結果是把兩個 words compile 進去了,既沒效果又出別的問題。 ==> 用 dictate() 問題比較多,不能放心亂用。 這兩行 debug trick 技巧留作紀念: if tos().find('vm.greeting')!=-1: pdb.set_trace() dictate('-indent indent') # 奇怪, dictate 就不行??? [x] (forget) in trouble now OK (forget) Failed to run <function compyle_anonymous at 0x0000018230B22400>: 'Word' object has no attribute 'cfa' --> 這問題自動好了 [x] improve the greeting when imported from python interpreter OK py> sys.argv . ['peforth.py'] <------- run from DOS box >>> import peforth OK py> sys.argv . [''] <----------------- run from python interpreter, need more help messages [x] 整理 try - exception in peforth.f # 從 python interpreter 裡用 genfunc() 產生 function >>> f = peforth.vm.genfunc(" 1/0",'','test2') >>> f <function test2 at 0x000001B42DB13E18> # 測試看看,確實會出錯 >>> f() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<string>", line 2, in test2 ZeroDivisionError: division by zero >>> f <function test2 at 0x000001B42DB13E18> # 直接 compile 進 peforth 的字典 >>> peforth.vm.comma(f) # 進到 peforth 一執行 error message 又是 </py> 發的! >>> peforth.main() OK here 1- @ :: () Failed in </py> (compiling=False): division by zero Body: pop()() Continue, Debug, or Abort? [C/d/a] a # 檢查看看,他確實是 test2 OK here 1- @ :> __doc__ . def test2(): 1/0 OK --> 探討原因,似乎「誰執行的,error message 就打給誰」,這樣應該資訊比較充分。 :: 裡面 interpret state 是 </py>, compiling state 則是 compyle --> 試試看 OK here 1- @ constant f \ 取得 test2 function OK : test f :: () ; \ 故意讓 :: 的 compiling state 表演 OK test \ 一執行,報錯的變成 phaseB() Callable in phaseB <function compyle_anonymous at 0x000001CC3771D1E0>: division by zero Body: def compyle_anonymous(): pop()() Continue, Debug, or Abort? [C/d/a] a --> ^^^^^^^--- 這個 Body information 似乎沒啥用,好像錯了?其實沒錯。 --> 如下,這是 f :: () 這種寫法的結果,沒錯,它的 Body 當然顯示不出 f 的 source code ------------ Definition in dictionary ------------ 00711: f __str__ (<class 'projectk.Word'>) 00712: def compyle_anonymous(): pop()() (<class 'function'>) 2 0 LOAD_GLOBAL 0 (pop) 2 CALL_FUNCTION 0 4 CALL_FUNCTION 0 6 POP_TOP 8 LOAD_CONST 0 (None) 10 RETURN_VALUE 00713: RET (<class 'NoneType'>) ------------ End of the difinition --------------- --> 正確的寫法是 : OK : test2 [ f , ] ; OK test2 Callable in phaseB <function test2 at 0x000001CC35113E18>: division by zero Body: def test2(): <------------------ 果然顯示出了 除0 的 source code 1/0 Continue, Debug, or Abort? [C/d/a] a OK see test2 { ... snip... "cfa": 715 } ------------ Definition in dictionary ------------ 00715: def test2(): 1/0 (<class 'function'>) 2 0 LOAD_CONST 1 (1) 2 LOAD_CONST 2 (0) 4 BINARY_TRUE_DIVIDE 6 POP_TOP 8 LOAD_CONST 0 (None) 10 RETURN_VALUE 00716: RET (<class 'NoneType'>) ------------ End of the difinition --------------- OK --> 即使在 interpret state 也不一定讓 </py> 來報錯(描述不精確),如下: OK f py: execute(pop()) Callable in phaseB <function test2 at 0x000001CC35113E18>: division by zero Body: def test2(): <----------------- 直接就看到真正的 source code 1/0 Continue, Debug, or Abort? [C/d/a] --> try: exception: 以後繼續改進。。。。。。 [x] multiple lines of tib. are not showing correctly. --> try test.f 111 tib. 222 tib. 333 tib. --> I've got it. From clipboard is ok, from accept2 is not. OK ^D 111 tib. 222 tib. 333 tib. ^D 111 \ ==> 111 (<class 'int'>) 111 \ ==> 222 (<class 'int'>) 111 \ ==> 333 (<class 'int'>) OK --> fixed [x] RET at end of dictionary is expected but missing <--- problem!! --> improve (dump) d dump --> ok now [x] Oh, my God! peforth can be a debugger or 內視鏡 of python: <py> any python code; peforth is available e.g. push() push(123);import peforth;peforth.main() # enter peforth break point, wonderful !! </py> --> The way to enter peforth interpreter is not very good, though it's clear. --> ok now, the breakpoint usage is : push(locals());ok('111>>') ==> python -i 本來就可以回到 phthon interpreter 以便進行靜態分析執行結果。 寫就 endo.py ( see my ynote) 當作 pdb 的另一選擇,在斷點上查看程式當時狀態。 [x] 手動 install peforth 的方法 see my ynote [x] peforth package 裡面 __init__.py 就是 peforth.py 也就是 __main__.py [x] 這時候要解決的是 peforth.f , quit.f 的 path , 用 __path__[0] 即可。 [x] import projectk.py as vm 要改成 from . import projectk.py as vm 把 path 指定清楚 [x] projectk.py 裡面用 vm = __import__(__name__) 在 package 裡不適用 改由 __init__.py 來填 vm.vm = vm 即可。 ==> 成功了 ! 手動安裝 ======== 1. 把本 project 的四個檔案 projectk.py quit.f peforth.f __main__.py 全部 copy 到如下新創建的 folder: c:\Users\yourname\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth 2. 把其中 __main__.py 多 copy 一份成 __init__.py 即可。 執行 peforth 有四個方式 ======================= 1. 從 project folder 下執行 python __main__.py OK 後打 : test .’ hello world!’ cr ; test 印出 hello world! 打 bye 離開。 2. 從 project folder 外面執行 python peforth OK 後打 : test .’ hello world!’ cr ; test 印出 hello world! 打 bye 離開。 3. 安裝好 peforth package 之後,任意 folder 下執行 python -m peforth 後同上。 4. 安裝好 peforth package 之後,任意 folder 下執行 python 然後 import peforth 然後按照指示打 peforth.main() 進入 peforth 後同上。 [x] why peforth? why endo.py? 一個 object 用來保存被觀察的 locals 不就好了? 1. indent 自由 2. 現成的 tool, forth 可以記住很多命令, 複雜的 command 可以臨時組合而成 [x] peforth 既然可以是個 python debug 學習工具,拿 peforth 來當 breakpoint 就要盡量簡單。 --> The REPL, peforth.main(), renamed to peforth.ok() REPL, or Read-Eval-Print-Loop. --> peforth.ok(prompt='OK ',loc={}) for user to specify the prompt and giving the locals at the moment. --> at the point ok() started, TOS is the tuple with information from the caller. The data stack was supposed to be empty, here after it won't be. --> The TOS provides the prompt, the locals [x] debug command 不要了, 會跟 py> debug which is vm.debug 撞名,沒必要增加這個問題。 [X] I found a python problem!! False==0 is True, False<=0 is True, False<=0.1 is True False<0.0001 is True, False<-0.1 is False 這是在引用 debug 來篩選哪些 breakpoint 做不做用時遇到的問題。debug 初值為 False 結果 debug<=33 竟然是成立的! 2019/11/25 10:26:06 [x] ." a" prints an extra space <--- problem RI: dot . command 早期為了 debug 好看,有多印一個 space 可以不要了。 [x] peforth.path to indicates the home directory where peforth.f is [x] IDLE generates keyboardinterrupts try-except can fix it http://effbot.org/zone/stupid-exceptions-keyboardinterrupt.htm --> 改寫 accept 加上了 try-except 檢查避免被 IDLE resize window 時的 KeyboardInterrupt 意外甩出。 --> resize window 的 KeyboardInterrupt 好了,但是 Ctrl-D 不能用,要輸入 multi-lines 可 改用 <accept> ... </accept> tib.insert 代替。 [x] peforth 的 version 在 whl 打包時要如何統一定義來源? 本文 "Single sourcing the version" 提供多種選擇。 https://packaging.python.org/guides/single-sourcing-package-version/#single-sourcing-the-version 我選用了 version.txt 檔案的方法,好像與 jeforth.3we 類似。 peforth/version.txt 只有一行 python statement 讓相關的單位都來參考它。 [X] 因此今後 projectk.major_version 就留在 projectk.py 裡沒有直接用到了。 __version__ = "1.02" 試出適合 setup.py 使用的 experiments 如下: dropall cls <accept> <py> loc = {} # locals with open(v('package-directory')+"peforth\\"+"version.txt") as fp: exec(fp.read(),{},loc ) # later on we use: loc['__version__'] push(loc) print('loc[\'__version__\'] is ',loc['__version__']) </py> </accept> tib.insert .s 實際在 setup.py 裡的程式: loc = {} # locals with open("peforth/version.txt") as fp: exec(fp.read(),{},loc ) # later on we use: loc['__version__'] version=loc['__version__'] # Refered in setup(...) as an argument 在 peforth/__main__.py 裡的程式: # Get version code from peforth/version.txt for whl package # to see the single source of version code. exec(readTextFile(path + "version.txt"),{},locals()) vm.version = __version__ [x] Improve (see) to see source code from project-k OK py> reset (see) <--- no good so far { "__class__": "function", "__module__": "peforth.projectk" } py> reset.__doc__ tib. \ ==> None (<class 'NoneType'>) py> reset.__code__ tib. \ ==> <code object reset at 0x000001CE712C2810, file "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\projectk.py", line 42> (<class 'code'>) __code__ is the chance to improve. [x] (see) only sees class and module, that can be improved to include some more e.g. __code__ ==> circular reference detected 無法解決, 暫用 .members .source 應付。 [x] 決心用 3hta 寫一個 pykb.f 專門用來給 peforth 當 keyboard input to support multiple lines find the process ID of peforth for sendkey s" where name like '%python%'" see-process --> 已經完成 include pykb.f 之後,用 {F7} 把 inputbox 下給 python [x] T550 上 activate-shell 無效,sendkeys 還好。但是 git.f 卻又好好的。 --> 似乎從 __main__.py 直接執行的 python 是切不過去的,經由 DOS Box 跑起來的才可以。 > include git.f \ 對照看看為何人家可以? > s" where name like '%python%'" list-them python.exe:8212 \ 查出 python (直接 double click __main__.py 起來的) > WshShell :: appActivate(8212) > launch-git-shell > shellId . ==> 1608 \ 查出 git shell > WshShell :: appActivate(1608) \ 這個可以切過去 > WshShell :: appActivate(8212) \ 這個就不行 --> 如果退出 python 則該 DOS Box 能 activate 嗎? > s" where name like '%cmd%'" list-them TOTALCMD64.EXE:20780 cmd.exe:22848 cmd.exe:9556 > WshShell :: appActivate(20780) 可以切到 total commander > WshShell :: appActivate(22848) 可以切到剛退出 peforth 的 DOS Box > WshShell :: appActivate(9556) 這個不知是啥,切不過去! 用 see-process 看進去,竟然可能是 Google Chrome 的東西 string Name; cmd.exe uint32 ProcessId; 9556 string Caption; cmd.exe string CommandLine; C:\WINDOWS\system32\cmd.exe /d /c "C:\Users\hcche\AppData\Local\youdao\Dict\Application\stable\text_extractor_host.exe" chrome-extension://aohddidmgooofkgohkbkaohadkolgejj/ --parent-window=0 < \\.\pipe\chrome.nativeMessaging.in.53dc641bdd08e0c9 > \\.\pipe\chrome.nativeMessaging.out.53dc641bdd08e0c9 string CreationClassName; Win32_Process --> 所以切不到某些 process 是有的,何解? 進一步研究發現,這個 python 是從 Anaconda3 run 起來的 > s" where name like '%python%'" list-them python.exe:20092 > WshShell :: appActivate(20092) string Name; python.exe uint32 ProcessId; 20092 string CommandLine; C:\ProgramData\Anaconda3\python.exe "C:\Users\hcche\Documents\GitHub\peforth\__main__.py" --> Not root cause. 即使 Anaconda 的 python 也能切過去,只要。。。 --> 把 Title 改成 peforth 吧!看看是否改得到所在的 cmd or powershell DOS command c:\> title titlename can change the doxbox title but it's not a process attribute so it doen't help. --> 所以答案是: 直接跑 __main__.py 或經過 dosbox 都可能行或不行, process ID 可以用 nnnn to processid 指定的,就算了吧! --> 多印些 info 讓 user 自己手動設 processid, Done! [x] improve .members --> __class__ attribute can easily be circularly deep and long m py> inspect.getmembers(pop()) py> str(pop()) tib. [x] try to str(obj) then json.loads(string) and then json.dumps --> str() generates non-json 不行! --> 暫時放棄了 [x] C:\Users\hcche\Documents\GitHub\Morvan\tutorials\tensorflowTUT\tensorflow6_session.f 如何一口氣把所有的 python section variables 都變成 forth values? l :> keys() tib. \ ==> dict_keys( ['result2', 'result', 'sess', 'product', 'matrix2', 'matrix1', 'tf'] ) (<class 'dict_keys'>) --> 要能 programmatically 產生 constant --> 改寫 constant 得 (constant) : (constant) ( n "name" -- ) // Create a constnat (create) <py> source = ' push(getattr(vm,"{}")["{}"])'.format(current, last().name) last().xt = genxt('constant',source) if not getattr(vm,current,False): setattr(vm,current,{}) exec('getattr(vm,"{}")["{}"]=pop()'.format(current, last().name)) </py> reveal ; OK 123 char x (constant) OK x . ==> 123 OK --> 一把就成功了! 能不能用在 colon definition 裡面? : test 234 char y (constant) ; test y . ==> 234 成功! --> 有了 (constant) 應該就可以自動產生所有的 locals() 了 ==> ok now! vm.outport(loc) defined in quit.f [x] Install peforth from source ---- 早期 (1.22 版以前) 不懂得用 python setup.py install 時的替代方法 ---- a command to update the peforth module @ c:\Users\...\Python36\Lib\site-packages\peforth\.. Get the path import os.path as ospath # py> pdb :> __file__ tib. \ ==> C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\pdb.py (<class 'str'>) # py> ospath.dirname(pdb.__file__) tib. \ ==> C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib (<class 'str'>) # py> ospath.split(pdb.__file__) tib. \ ==> ('C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib', 'pdb.py') (<class 'tuple'>) # py> ospath.splitdrive(pdb.__file__) tib. \ ==> ('C:', '\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\pdb.py') (<class 'tuple'>) # py> ospath.splitext(pdb.__file__) tib. \ ==> ('C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\pdb', '.py') (<class 'tuple'>) # py> ospath.splitunc(pdb.__file__) tib. \ ==> ('', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\pdb.py') (<class 'tuple'>) py> ospath.dirname(pdb.__file__)+"\\site-packages\\peforth\\" ( targetPath ) getenv(key, default=None) Get an environment variable, return None if it doesn't exist. The optional second argument can specify an alternate default. key, default and the result are str. getenv compare with py> ospath.dirname(pdb.__file__) if same then proceed the patch program to copy all files if not then warning and stop 算了,直接 copy 就好了 ------ update.bat ------ set pythonlib=C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib copy -y version.txt %pythonlib%\site-packages\peforth copy -y projectk.py %pythonlib%\site-packages\peforth copy -y __main__.py %pythonlib%\site-packages\peforth copy -y __init__.py %pythonlib%\site-packages\peforth copy -y peforth.f %pythonlib%\site-packages\peforth copy -y quit.f %pythonlib%\site-packages\peforth ------ ------ ------ ------ ------ [x] 發現 pip help install 列出了 pip install 的種種用法。 update.bat 直接從 project directly update peforth package 到 lib\site-packages\peforth 的方式太暴力了。 --> Try this example from pip help install : pip install [options] [-e] <local project path> ... [X] 有待研究 14:33 18/05/21 v1.16 試用結果,失敗: c:\Users\hcche\Documents\GitHub>pip install -e peforth Obtaining file:///C:/Users/hcche/Documents/GitHub/peforth Missing build time requirements in pyproject.toml for file:///C:/Users/hcche/Documents/GitHub/peforth: 'setuptools' and 'wheel'. This version of pip does not implement PEP 517 so it cannot build a wheel without 'setuptools' and 'wheel'. Installing build dependencies ... done Complete output from command python setup.py egg_info: Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\Users\hcche\Documents\GitHub\peforth\setup.py", line 9, in <module> with open("peforth/version.txt") as fp: FileNotFoundError: [Errno 2] No such file or directory: 'peforth/version.txt' ---------------------------------------- Command "python setup.py egg_info" failed with error code 1 in C:\Users\hcche\Documents\GitHub\peforth\ c:\Users\hcche\Documents\GitHub> [x] Ynote: "研究 install peforth from source 的方法" 已經成功。 [/] jump to 遙遠的下面 "---- 2018.12.15 懂得用 python setup.py install 需要修改 ----" - [/] 螢幕編輯器 os.get_terminal_size(...) Return the size of the terminal window as (columns, lines). [x] (forget) 有 error 'Word' object has no attribute 'cfa' <-- 用 getattr(obj,name,None) 即可。 [x] peforth 1.3 uploaded to pypi. 準備來寫 wiki 介紹怎麼 應用 peforth 來學習 TensorFlow. --> Done https://github.com/hcchengithub/peforth/wiki/Example-4-Examine-a-Machine-Learning-exercise [x] 繼續完成 peforth.f 的 selftest 元件 --> string 轉譯成 array [d ... d] [r ... r] 要用到 : test2 char 123,456 s" [{}]" :> format(pop()) py> eval(pop()) ; --> String.indexOf 改成 String.find \ Selftest 要 redirect print() 方便取得並檢查螢幕輸出的內容。 \ 這是個 redirect print() 的有效範例 \ Selftest 要 redirect print() 方便取得並檢查螢幕輸出的內容。 \ 改寫成輸出到 buffer. See http://www.cnblogs.com/turtle-fly/p/3280519.html <accept> py> [""] value screen-buffer // ( -- 'string' ) Selftest screen buffer <py> class Screenbuffer: def __init__(self,buf): self.stdoutwas=sys.stdout self.buffer=buf def write(self, output_stream): self.buffer[0] += output_stream def view(self): self.stdoutwas.write(self.buffer[0]) def reset(self): sys.stdout=self.stdoutwas vm.Screenbuffer=Screenbuffer # redirection sys.stdout=Screenbuffer(vm.forth['screen-buffer']) # print to screen buffer sys.stdout.stdoutwas.write("-------1111-----\n") print( 'hello') print( 'world') sys.stdout.stdoutwas.write("-------2222-----\n") # view screen buffer sys.stdout.view() # reset sys.stdout.reset() outport(locals()) </py> </accept> tib.insert [x] 探討,整理,討論幾種產生 function 或執行 inline python code 的方法 1. projectk.py genxt() 有 __doc__ 專為 code word xt 硬性 _me argument 2. projectk.py genfunc() 有 __doc__ 一般用途 name args body 3. peforth.f compyle 產生一般用途的 annonymous function 沒有 args 4. <py>...</py> 前後都是 immediate 正常使用沒問題。但若想先組合好 source code 再讓 </py> or </pyV> 去執行,就有變化了。以下 try, try2 兩個都是有意義的、可以的 OK : try char 123 [compile] </pyV> ; OK try . 123OK OK : try2 [ char 123 ] </pyV> ; OK try2 . 123OK 但是下面這個其實是不知所云的: OK : try3 char 123 </pyV> ; 其結果也是莫名其妙的: Error! try3 unknown. OK 5. 直接用 exec(), eval() 執行臨時組合出來的 string, e.g. [r [d [p 的定義。 6. 直接用 compile(), genfunc() 可能不會有,吧? [x] 有很嚴重的 bug OK : test <py> 123 </pyV> ; OK see test ...snip... ------------ Definition in dictionary ------------ 00784: def compyle_anonymous(): push(123 ) (<class 'function'>) 2 0 LOAD_GLOBAL 0 (push) 2 LOAD_CONST 1 (123) 4 CALL_FUNCTION 1 6 POP_TOP 8 LOAD_CONST 0 (None) 10 RETURN_VALUE 00785: RET (<class 'NoneType'>) ------------ End of the difinition --------------- OK OK : test py> "abc" ; reDef test OK see test <----------- 沒反應! OK ' test (see) <----------- 沒反應! OK ' test . <----------- 沒反應! --> 順序倒過來怎樣? 先試 : test py> "abc" ; --> OK 一切正常 --> 再一個空的東西 : nothing ; --> 也正常! --> 就是不能有 inline python? : test2 py> 1234 ; --> OK 一切正常 --> 整個重來,那這樣呢? : test <py> 123 </pyV> ; : test2 py> "abc" ; --> 都 OK, 算了,不了了之。可能是寫 selftest 捅出來的哈哈題。 [x] python or javascript can't access by address then how to access by reference instead of access by value? (call by name call by address call by reference) 昨天寫 selftest 為了取得 screenbuffer 就是得定義成 py> [""] value screen-buffer // ( -- ['string'] ) Selftest screen buffer 而非 py> "" value screen-buffer // ( -- 'string' ) Selftest screen buffer 否則會 access 不到這個特定的 string. [x] 照著 MetaMoji 2017-9-17 15:15 的討論, 研究把 <selftest> sections 都 dump 出來的辦法。 --> 從 quit.f 裡一查即知, 應該是一行解決: py> tick('<selftest>').buffer char peforth-selftest.f writeTextFile stop --> 成功! --> 此後就是改寫 peforth-selftest.f 而已。 [x] (constant) 遇到 reDef writeTextFile 會議常中止 --> 不能用 panic 警告,用 print 即可。 [x] About to release peforth v1.4 1. py:~ py>~ ::~ :>~ are so good to have. 2. selftest not completed yet but nice to have some Release steps see Ynote: "Pack peforth to peforth.whl" > 打包步驟。 [x] v1.4 released, from now on v1.5 [x] 有了 argv 就不要有 greeting 也不要 reDef warnings. --> 所以要提早取得 command line, quit.f 太晚了。 --> Done! [x] PyPI README.rst 有辦法了 可查看 rst2html 也可以 convert from markdown https://stackoverflow.com/questions/26737222/pypi-description-markdown-doesnt-work --> 先用 pypandoc module 用轉的看看 py:~ import pypandoc; push(pypandoc) constant pypandoc // ( -- module ) pypandoc :: convert('README.md','rst') Failed in </py> (compiling=False): No pandoc was found: either install pandoc and add it to your PATH or or call pypandoc.download_pandoc(...) or install pypandoc wheels with included pandoc. --> OK pypandoc :> download_pandoc py: help(pop()) Help on function download_pandoc in module pypandoc.pandoc_download: download_pandoc(url=None, targetfolder=None, version='latest') Download and unpack pandoc Downloads prebuild binaries for pandoc from `url` and unpacks it into `targetfolder`. :param str url: URL for the to be downloaded pandoc binary distribution for the platform under which this python runs. If no `url` is give, uses the latest available release at the time pypandoc was released. :param str targetfolder: directory, where the binaries should be installed to. If no `targetfolder` is give, uses a platform specific user location: `~/bin` on Linux, `~/Applications/pandoc` on Mac OS X, and `~\AppData\Local\Pandoc` on Windows. OK pypandoc :: download_pandoc() * Downloading pandoc from https://github.com/jgm/pandoc/releases/download/1.19.2.1/pandoc-1.19.2.1-windows.msi ... --> download 半天下不來。。。很煩 --> http://pandoc.org/ 有 online converter , 分小段手動把 README.md 轉成 README.rst 吧! pandoc.org 專門做各種文檔格式轉換。 --> Online reStructuredText editor http://rst.ninjs.org/ --> Yes!! [x] Release v1.5 [x] 把 update.bat setup.bat setup.py 等等統一起來 --> 抄 3we 的 setup.bat --> done! [x] Example in comment of the "words" command needs improvement --> 整個改良了,如今可以接受 pattern [x] alias 要繼承原來的 help & comment 嗎? 整個檢查看看。。。 --> 要,但是 // 改成只有現有的 help 是 "(...)" 才用尾綴的,否則都用取代的。 [x] Bug: (see) unexpectedly leaves the given tos on the data stack if it's not a Word. [x] 發現 python 應該也能執行 WshShell 因此可能不用靠 jeforth.3hta pykb.f [x] 錄 elearning 介紹 peforth [ ] wiki 介紹 py: help(genxt) py> genxt .source ' + . members 等好用的東西 --> 唉,當時的層次真是,不好說啊!好是好,推薦自己的好東西,沒有照 顧 user 的需求。 [X] 把網頁或至少 3hta 變成 peforth 的 input box, 解決 multiple line input 的問題。 --> 從 peforth 去 launch 3hta include pykb.f --> python 能不能知道自己是誰 run 的?如果知道,就可以解決 Wsh.sendkey() 不知往哪 send 的問題。 --> 有 ^D, ipython, jupyter notebook 等方法了。 [x] peforth.f selftest almost done, still many to go: <py> [w.name for w in words['forth'][1:] if 'pass'!=getattr(w,'selftest',False)] </pyV> cr . cr [x] 最進新發現的 bug 特別要加進 selftest --> (see) none Word 之後 stack 沒清乾淨 --> 有兩條路的 words 都可疑! --> 甚至,例如 words 最後有多印一個 print() 也是測試的重點。 [/] readTextFile, writeTextFile 好像都還不能用 -- haha bug 被 inport 取代掉的 [x] display-off 之後 on 不回來,突然發生,很奇怪。沒了 display 不好 debug. display-off 之內如果是 ." hello world" 就沒問題。 是 words help 才有問題,而且是卡在 words 裡面了 <== 因為 words 出錯,導致 display-on 沒 run 到。只要在 words 後面加上 e 就好了。表示是 nexttoken() 又出問題了,從 檔案裡執行(而非console command line)時它會繞過 CRLF 往下抓到下一個 token 因此 程式都亂了,特別是被他抓走的正好就是 display-on 螢幕都沒了,所以難搞。 這類 nexttoken 可有可無的命令在 selftest 時都可能有問題。 應該都會被挑出來。 [x] review 所有用到 word 以及 nexttoken() 的地方。。。 [x] python -i -m peforth version exit 之後會有 error ---> 不見了!可能是 WshShell win32 package --> 又來了!!! c:\Users\hcche\Documents\GitHub\peforth>python -i __main__.py p e f o r t h v1.07 source code http://github.com/hcchengithub/peforth *** Start self-test *** End of peforth.f self-test ... pass OK bye Traceback (most recent call last): File "__main__.py", line 129, in <module> ok() .... snip ...... File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\_sitebuiltins.py", line 26, in __call__ raise SystemExit(code) SystemExit: None >>> --> can repro? python -i __main__.py and bye ... Yes, repro'ed --> OK py: exit() <--- can repro OK py: exit(0) <--- repro too!!! --> can it repro on a simplified ~.py instead of peforth? --> Yes!! as simple as only one statement in test.py : c:\Users\hcche\Downloads>type test.py exit() c:\Users\hcche\Downloads>python -i test.py Traceback (most recent call last): File "test.py", line 1, in <module> exit() File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\_sitebuiltins.py", line 26, in __call__ raise SystemExit(code) SystemExit: None >>> exit() c:\Users\hcche\Downloads> --> It's not a problem. -i switch in command line normal behavior. --> bye to use os._exit(0) instead of exit() can fix the problem. [x] exit command to set vm.exit=True to stop the ok() in __main__.py [/] add bp('prompt') in addition to ok() to avoid the unnecesary awkward breakpoint instruction --> Listen to users, don't assume. ok(prompt,loc,cmd) arguments are all very useful. [x] how to get vm's parent? so as to show greeting message differently for different situations. i.e. ok() or peforth.ok() to enter peforth interpreter --> 本來的目的不知能不能達到,有 parent 的 data 總是好的。 [x] Bug found c:\Users\hcche\Documents\GitHub\peforth>python -i -m peforth exit OK OK <=== python interpreter prompte expected --> 因為 vm.exit 有兩個!!!! peforth module __init__.py __main__.py 的關係不是一個!!! module 裡面的 __main__.py 專供 -m 執行用,改寫看看。。。。 ==> 簡化整個執行方式,決心放棄從 project folder 執行。 ---> 2019-05-11 重新 study 有成 只保留 import peforth 或 python -m peforth 兩種。 --> Since commit c3d7677 on Oct 8, 2017 [x] 因應新檔案配置,setup.bat 的自動化晚點再做 --> Done [x] Tests before a Release v1.07 [x] 所有 run 法帶 selftest 跑一遍 [x] Run setup.bat 做出有 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth <==== 啊!不行,會上網抓。 pip install 剛做好的 wheel [x] 1. __main__.py [/] selfttest [/] greeting [/] exit [/] bye [x] 2. python __main__.py version drop [/] .s words [/] exit [/] bye [x] 3. python -i __main__.py [/] selfttest [/] greeting [/] exit [/] bye [x] 4. python -i __main__.py version drop [/] .s [/] exit [/] bye [x] 5. python -i -m peforth [/] selftest .s words exit [x] 6. python -i -m peforth version drop [x] 7. python import peforth [/] selftest peforth.ok() .s words <--- no parent [/] 1234 bye check echo %errorlevel% [x] 所有 run 不帶 selftest 再跑一遍 [x] Run setup.bat 做出取消 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth <==== 啊!不行,會上網抓。 pip install 剛做好的 wheel [x] 1. __main__.py [/] selfttest [/] greeting [/] exit [/] bye [x] 2. python -i -m peforth [/] selftest .s words exit bye [x] 3. python -i -m peforth .' Hello World!!' cr bye [x] 4. python import peforth [x] 考慮 README.rst 改良 --> GitHub 版的先弄好 [x] hello world Ynote: 草稿 peforth wiki article hello world _wiki_ [x] README.md --> README.rst by http://rst.ninjs.org [x] These words should be moved into selftest section 'description', 'expected_rstack', 'expected_stack', 'test-result', '[all-pass]', '***', 'all-pass', '[r', 'r]', '[d', 'd]'] [x] while display-off dispaly-on should be moved out! [x] a new word to include python file directly -- pyclude supports commands after #__peforth__ comment by simply removing all #__peforth__ Also comment out "from __future__ import print_function" lines 1. read the file 2. find all #__peforth__ replace with null 3. find "from __future__ import print_function" comment it out. 4. -indent indent 5. add <py> and </py> 6. tib.insert the string : pyclude ( <pathname.py> -- ... ) // Run the .py file in a <PY>..</PY> space CR word readTextFile py> re.sub("#__peforth__","",pop()) py> re.sub(r"(from\s+__future__\s+import\s+print_function)",r"#\1",pop()) -indent indent <py> " <p" + "y>\n" + pop() + "\n </p" + "y>\n" </pyV> tib.insert ; /// Auto-remove all #__peforth__ marks so we can add debug /// statements what are only visible when debugging. /// Auto comment out "from __future__ import print_function" /// that is not allowed when in a <PY>..</PY> space. [x] tib.insert is dictate now, an alias. [x] Tests before a Release v1.08 [x] 所有 run 法帶 selftest 跑一遍 [x] Run setup.bat 做出有 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth <==== 啊!不行,會上網抓。 pip install 剛做好的 wheel [x] 1. python -i -m peforth [/] selftest .s words exit [x] 2. python -i -m peforth version drop [x] 3. python import peforth [x] selftest peforth.ok() .s words <--- no parent [x] 1234 bye check echo %errorlevel% [x] 所有 run 不帶 selftest 再跑一遍 [x] Run setup.bat 做出取消 selftest 的 wheel <-- 注意!改的是 site-packages\peforth [x] pip uninstall peforth [x] pip install peforth <==== 啊!不行,會上網抓。 pip install 剛做好的 wheel [x] 1. python -i -m peforth [/] selftest .s words exit bye [x] 2. python -i -m peforth .' Hello World!!' cr bye [x] 3. python import peforth [x] 考慮 README.rst 改良 [x] version.txt advanced to v1.09 [x] the way I get the path is not good, data files are in a separated folder in ubuntu. I have to manually copy data files to lib/python3.5 Copy : none .py files are in ~/.local/lib/site-packages/peforth peforth.f peforth.selftest quit.f version.txt To : .py files are in ~/.local/lib/python3.5/site-packages/peforth __init__.py __main__.py projectk.py Solutions I found on Stackoverflow are bad, do it manually is fine. [x] A wiki page discusses this. done. [/] 有機會解掉了。Search my Ynote: "2018/01/17 16:39 插曲,意外發現查出 python 的東西都放哪裡的方法了!peforth 在 Ubuntu 上跑可能有救了。_peforth_ _ubuntu_" [/] Study this : c:\Users\hcche\Documents\GitHub\DeepSpeech>py -m site sys.path = [ 'c:\\Users\\hcche\\Documents\\GitHub\\DeepSpeech', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\python36.zip', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\DLLs', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\win32', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\win32\\lib', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\Pythonwin', ] USER_BASE: 'C:\\Users\\hcche\\AppData\\Roaming\\Python' (doesn't exist) USER_SITE: 'C:\\Users\\hcche\\AppData\\Roaming\\Python\\Python36\\site-packages' (doesn't exist) ENABLE_USER_SITE: True c:\Users\hcche\Documents\GitHub\DeepSpeech> [/] 從 c:\Users\hcche\Documents\GitHub\Morvan\tutorials\tensorflowTUT\tf17_dropout\full_code-2.py 裡,開發 harry_port() 的經驗看來,有了這麼強大的工具之後,用它臨時定義出來的 words 不希望隨著 breakpoint 結束而被 --- marker --- 清除。怎麼辦? 1. 要保留的東西放到 tutorial 的前面,或先 include 另一個 tool kit --> 這個好! 2. 如果不用 marker (因為我的 marker 太強了,跨 vocabulary 全清!) 就是要有 forget 能單清本 current vocabulary 的 words 到 ─── 為止。 3. 而且要有 vocabulary, 把要保留的 words 定義到 root 去,平時在 tutorial vocabulary 工作。 [x] 這個 interpreter for loop 有何問題? OK 3 [for] t@ 100 + int digit [next] Failed in </py> (compiling=False): pop from empty list Body: push(pop().data[pop()]) OK ==> 問題可能是出在 digit 裡面用到 <text>...</text> dictate 的 macro 形式 證實了,因為不用該 macro 就好了 [/] harry_port() 的使用技巧,要寫進 help 裡!像這個例子,不能用 <py>...</py> block 因為它會先 compile 而這個應用不能先被 compile : OK <text> locals().update(harry_port()); batch_X, batch_Y = mnist.train.next_batch(100); outport(locals()) </text> py: exec(pop()) [x] exit 不夠力,會往下做。要再補個 stop 才行。 code stop reset() end-code // ( -- ) Stop the TIB loop code exit if compiling: comma(EXIT) else: vm.exit=True ; reset() <---- 補 reset() 即 stop end-code immediate // ( -- ) Exit this colon word. 靠!意外發現這個 bug !! 其實早就看到 exit 之後會暴衝,沒太在意。 [x] <accept> nop </accept> 同一行不行,要改良嗎? ---> Done! [x] Tests before a Release v1.09 [x] 所有 run 法帶 selftest 跑一遍 [x] Run setup.bat 做出有 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [x] 1. python -i -m peforth [/] selftest .s words exit [x] 2. python -i -m peforth version drop [x] 3. python import peforth [x] selftest peforth.ok() .s words <--- no parent [x] 1234 bye check echo %errorlevel% [x] 所有 run 不帶 selftest 再跑一遍 [x] 注意!改的是 site-packages\peforth\quit.f 所以要 在 setup.bat 做 wheel 以前插入這個動作!!!!! Run setup.bat 做出取消 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [x] 1. python -i -m peforth [x] selftest .s words exit bye [x] 2. python -i -m peforth .' Hello World!!' cr bye [x] 3. python import peforth [x] 考慮 README.rst 改良 [x] version 改成 1.11 (必須跳過 1.10 會變成 1.1) [/] -indent 可以更聰明一點,目的讓 <text>...</text> 內部更自由。 當 </text> 所在行是 blank line 時,就用它的長度當作 -indent 的最小值,這 麼一來 <text> 之後就可以接著放東西。那它的 space 數比 </text> 之前小,就 會被「加長」到「最小值」。這樣更自由。 [x] exit stop 之外,還需要一個中止 including 的方法。或者是仔細定義 stop, exit 的差別或者合併。vm.exit 是給 ok() 看的,很明顯它用來回到 python interpreter 這已經有點頭痛了,因為 exit 同時也是給 inner loop 看的 instruction 跟 RET 等效。意思是,如果 exit 再有別的意思,恐怕連我自己都糊塗了。那只剩 stop 了, stop 用來打斷 outer loop 也很明確。所以,需要新的 word ... break-include 因為 sinclude 是用 dictate 來處理 .f file 的,可能把 ntib 改一下就有 break-include 的效果了,試試看,把斷點斷在 xray.f 裡查看半路的 tib 含不含 tutrial 。。。 ---> Bingo!! : break-include ( -- ) // Break including .f file py: vm.ntib=len(tib) ; stop 就是 reset() exit 在 comiling 時是 EXIT==RET; 否則就是 vm.exit=True 而已,把 ok() 停下來。 2020/06/03 10:34:10 該為 proeforth 寫了個 skip2 更有彈性。 [x] peforth 可以用來幫 .py import modules py> os.getcwd() constant working-directory // ( -- "path" ) Tutorial home directory saved copy \ my MNIST_data directory is there cd c:\Users\hcche\Downloads py:~ from tensorflow.examples.tutorials.mnist import input_data as mnist_data; push(mnist_data) parent :: ['mnist_data']=pop(1) \ pop(1) 很傷腦筋, in-line 要還原成 python 才看得懂。 [x] *debug* 改寫, 不要用 pdb.set_trace() 了 不用 import 就使用 pdb 的方法 py: sys.modules['pdb'].set_trace() : *debug* ( <prompt> -- ... ) // FORTH breakpoint BL word ( prompt ) py: ok(pop(),cmd="cr") ; /// How to invoke pdb: /// py: sys.modules['pdb'].set_trace() [x] now 11 *debug* >> 22 <== but 22 got skipped ! <----- problem --> fixed [x] *debug* can not be used in compiling mode (colon definition) yet because the following prompt needs to read tib immediatedly [x] Bug found, OK help a Word in phaseB <Word 'help'>: 'int' object has no attribute 'help' help improved [x] new word "import" works fine [x] new word __main__ works fine s" dos title " __main__ :> __file__ + CRLF + dictate drop Note! 如果沒有 CRLF 則 dos 會抓到 dictate 之後去,連 drop 都當成 command line 的一部份 [x] release 1.11 new words import, __main__, break_include, and improved *debug* and help [X] ( ... ) comment nested v1.23 [x] CRLF leaves '\r\n' on TOS [x] Ignore command line when running in jupyter notebook (Pdb) vm.commandline '-f C:\\Users\\hcche\\AppData\\Roaming\\jupyter\\runtime\\kernel-be1c3297-f7a9-4cb2-a7aa-b06e29f158ea.json' (Pdb) sys.argv ['c:\\users\\hcche\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\ipykernel_launcher.py', '-f', 'C:\\Users\\hcche\\AppData\\Roaming\\jupyter\\runtime\\kernel-be1c3297-f7a9-4cb2-a7aa-b06e29f158ea.json'] (Pdb) sys.argv[0] 'c:\\users\\hcche\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\ipykernel_launcher.py' (Pdb) sys.argv[0].endswith('ipykernel_launcher.py') --> True , the key to know about the case 跑 jupyter notebook 又發生 Error! -f unknown. 的問題。先前是因為 jupyter notebook 下 import peforth 會有 unexpected command line 如上。 這不是改好了嗎? --> 光排除 py> sys.argv[0].endswith('.py') 不夠 py> sys.argv[0].endswith(('.py','.ipy','.ipynb')) [x] itchat 執行中,常有這個問題發生: Traceback (most recent call last): File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\projectk.py", line 342, in outerExecute f = float(token) # triggers exception if token is malformed ValueError: could not convert string to float: '<mmreader><category' 為何 try: exception: 攔不住它? Reproducing steps (at home on my desktop) : c:\Users\hcche\Documents\GitHub\ibrainfuck\bfinterpreter>python v1.12 at home >>> import peforth >>> peforth.ok() OK sys . cr Traceback (most recent call last): File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\projectk.py", line 342, in outerExecute f = float(token) # triggers exception if token is malformed ValueError: could not convert string to float: 'sys' 終於找到複製方法了。。。。 --> 改寫 projectk.py > outer() 之後好了。 [x] study how to run brainfuck interpreter c:\Users\hcche\Documents\GitHub\ibrainfuck --> See Ynote __brainfuck_ [x] 因 bug 發現 harry_port() 的更佳用法 (quit.f updated) \ Study 三寶 \ 1. DOS Box title import peforth; peforth.ok(loc=locals(),cmd="include xray.f") \ 2. Breakpoint peforth.ok('11> ',cmd="parent inport") \ 3. Lab of copy-paste <accept> <text> # --------------------------------------------------------------------------- all locals() can use # --------------------------------------------------------------------------- </text> -indent py: exec(pop(),harry_port()) # If only globals is given, locals defaults to it. </accept> dictate [x] msg is a forth value and also a peforth global. blabla bla something wrong. --> 不是因為繼承 JavaScript 的想法,object 與 dict 不分所造成的混淆。 (::) (:>) 都是中性的 obj :: methed 或 obj :: ['property'] 隨人自己 的認知而定,語法並無問題。 [x] Ipeforth kernel for Jupyter is ok now. Bring peforth to http://nbviewer.jupyter.org/ How to install Ipeforth kernel for jupyter notebook : Copy kernel.json to here: %USERPROFILE%\AppData\Roaming\jupyter\kernels\peforth\kernel.json c:\Users\hcche\AppData\Roaming\jupyter\kernels\peforth\kernel.json manually create the directory if %USERPROFILE%\AppData\Roaming\jupyter\kernels\ is not existing. [x] Tests before a Release v1.13 [x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=False Run setup.bat 做出取消 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [x] 1. python -i -m peforth [/] no-selftest .s words exit [x] 2. python -i -m peforth version drop [x] 3. python import peforth [x] selftest peforth.ok() .s words <--- no parent [x] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> .s cd help bye .s cd help exit [x] 考慮 README.rst 改良 [x] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] 所有 run 法帶 selftest: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=True Run setup.bat 更新本地版本以供測試 [x] 1. python -i -m peforth [/] with-selftest .s words exit bye [x] 2. ipython -i -m peforth .' Hello World!!' cr bye [x] 3. ipython import peforth .s words [x] selftest peforth.ok() .s words <--- w/parent [x] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> .s cd help bye .s cd help exit [x] 考慮 README.rst 改良 [x] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] version 改成 1.14 (必須跳過 1.10 會變成 1.1) [x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 [x] 讓 jupyter feature peforth --> 已經加進 jupyter 的 kernel list: https://github.com/jupyter/jupyter/wiki/Jupyter-kernels [ ] Like harry_port that brings all wanted variables to projectk How to make it easier? [ ] Study when deep in a certain module, how peforth find and bring in specified variables? 1. debug the toy.. keras exercise, breakpoint deep in a keras module 2. instead of using the trick of loc={**locals(),**{'foo':foo,'bar':bar}} try to find foo,bar actual parent 3. access volatile variables out of their scope may not be a good idea but being able to access them at a peforth breakpoint is necessary. tensor_shape is imported in C:\Users\hcche\AppData\Local\Programs\Python\Python36\Lib\site-packages\tensorflow\python\keras\_impl\keras\layers\wrappers.py char input_shape <text> \ local variable locals :> ['{0}'] constant {0} __main__ :: peforth.projectk.{0}=v('{0}') </text> :> format(pop()) dictate char tf <text> \ global variable __main__ :> {0} constant {0} __main__ :: peforth.projectk.{0}=v('{0}') </text> :> format(pop()) dictate * 1. char foobar module ( module ) 2. py: setattr(sys.modules['foobar'].projectk,'foobar',v('foobar')) \ add to peforth * 1. import numpy constant np // ( -- numpy ) module object, method #1 py> sys.modules['numpy'] constant np // ( -- numpy ) method #2 __main__ :> np constant np // ( -- numpy ) method #3 2. np __main__ :: peforth.projectk.np=pop(1) \ peforth global np __main__ :: np=pop(1) \ __main__ global, see 'help __main__' * 3. py: setattr(sys.modules['peforth'].projectk,'np',v('np')) \ alt method char child_input_shape <text> \ local variable locals :> ['{0}'] constant {0} __main__ :: peforth.projectk.{0}=v('{0}') </text> :> format(pop()) dictate \ make librosa a global in peforth char librosa py> tick(tos()) execute py: globals()[pop()]=pop() \ even simpler way import librosa constant librosa char librosa librosa py: globals()[pop()]=pop() char input_shape <text> \ local variable locals :> ['{0}'] constant {0} __main__ :: peforth.projectk.{0}=v('{0}') </text> :> format(pop()) dictate char tensor_shape <text> \ local variable locals :> ['{0}'] constant {0} __main__ :: peforth.projectk.{0}=v('{0}') </text> :> format(pop()) dictate char selfLayer <text> \ local variable locals :> ['{0}'] constant {0} __main__ :: peforth.projectk.{0}=v('{0}') </text> :> format(pop()) dictate import peforth # [ ] _debug_ peforth.ok(cmd=''' 0 value Count none value child_output_shape exit ''') try: child_output_shape = child_output_shape.as_list() except Exception as err: peforth.ok('33> ',loc={**locals(),**{'tensor_shape':tensor_shape,'self.layer':self.layer,'err':err}}) locals :: pop('peforth') locals inport tensor_shape :> TensorShape(v('input_shape')).as_list() constant input_shape2 tensor_shape :> TensorShape([v('input_shape2')[0]]+v('input_shape2')[2:]) constant child_input_shape self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> (?, 2048) (<class 'tensorflow.python.framework.tensor_shape.TensorShape'>) self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> (?, 2048) (<class 'tensorflow.python.framework.tensor_shape.TensorShape'>) self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> None (<class 'NoneType'>) self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> None (<class 'NoneType'>) [x] jupyter notebook 裡無法 exit , 每次 exit 都會留下一個東西在 stack 裡,出不去。 load> exit load> .s 0: <IPython.core.autocall.ZMQExitAutocall object at 0x0000020577BF5EF0> (<class 'IPython.core.autocall.ZMQExitAutocall'>) load> --> 用 .py 比較看看 --> 沒這問題。 --> 直接進去,直接出來看看 --> 馬上卡住了。 --> 簡化 the peforth cell, 比較結果 ... 在 locals inport 之後多出一個 exit 看起來還是原來的 exit 但多出來就是不對,而且 --- marker clean up 之後好了! 充分證明就是它。 --> 怎麼發生的?--> ipython case 下,當時的 locals() 就是有 exit quit 等一堆東西 正好 exit 撞上了,而 locals :> ['exit'] . cr --> <IPython.core.autocall.ZMQExitAutocall object at 0x000001DBB24B5EF0> 正是那個怪東西。 RI [ ] 最好 inport 能用挑的。程序如下: load2> locals keys . cr dict_keys(['__name__', '__doc__', '__package__', '__loader__', '__spec__', '__builtin__', '__builtins__', '_ih', '_oh', '_dh', 'In', 'Out', 'get_ipython', 'exit', 'quit', '_', '__', '___', '_i', '_ii', '_iii', '_i1', 'tf', '_i2', 'tflearn', '_i3', 'speech_data', '_i4', 'time', 'peforth', 'epoch_count', 'learning_rate', 'training_iters', 'batch_size', 'width', 'height', 'classes', '_i5', 'batch', 'word_batch', '_i6', 'net', 'model', 'x', '_i7']) \ 從上表裡面挑要用的東西 <py> ['get_ipython', 'tflearn', 'speech_data', 'time', 'epoch_count', 'learning_rate', 'training_iters', 'batch_size', 'width', 'height', 'classes', 'batch', 'word_batch', 'net', 'model', 'x'] </pyV> ( [挑過的keys] ) \ 從 locals 裡面挑這些東西出來 <py> dict([(k,v) for k,v in v('locals').items() if k in tos()]) </pyV> nip ( {挑過的locals} ) \ 可以放心地 inport 成 peforth words 了 inport [ ] python virtualenv http://docs.python-guide.org/en/latest/dev/virtualenvs/ 解決的問題也是 FORTH 的問題,參考人家怎麼解的,可以想想怎麼沿用,看如何只 include 必要的東西。 [x] Ubuntu 的問題好像有解了, --> Ubuntu 之下 OK site :> USER_BASE . cr 不存在! /home/hcchen5600/.local OK site :> USER_SITE . cr 不存在! /home/hcchen5600/.local/lib/python3.6/site-packages OK site :> PREFIXES . cr ['/usr', '/usr'] 實際東西放在 site.PREFIXES[0] + /local/lib/site-packages/peforth/ --> windows OK site :> USER_BASE . cr 不存在! C:\Users\hcche\AppData\Roaming\Python OK site :> USER_SITE . cr 不存在! C:\Users\hcche\AppData\Roaming\Python\Python36\site-packages OK site :> PREFIXES . cr ['C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36'] 實際東西放在 site.PREFIXES[0] + /lib/site-packages/peforth/ --> Ubuntu virtualenv >>> import site >>> site.PREFIXES ['/home/hcchen5600/GitHub/DeepSpeech', '/home/hcchen5600/GitHub/DeepSpeech'] >>> site.USER_BASE '/home/hcchen5600/.local' >>> site.USER_SITE '/home/hcchen5600/.local/lib/python3.6/site-packages' 實際東西放在 site.PREFIXES[0] + /lib/site-packages/peforth/ 也就是 \rootfs\home\hcchen5600\GitHub\DeepSpeech\lib\site-packages\peforth\.. \ Windows 下可 normalize the path 照上面實施, windows 下變成 OK py> path . cr C:\Users\hcche\AppData\Local\Programs\Python\Python36/lib/site-packages/peforth/ \ 這可以用 ntpath.normpath() 解決 OK import ntpath OK constant ntpath OK ntpath dir . cr ['__all__', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', '_get_bothseps', '_getfinalpathname', '_getfullpathname', '_getvolumepathname', 'abspath', 'altsep', 'basename', 'commonpath', 'commonprefix', 'curdir', 'defpath', 'devnull', 'dirname', 'exists', 'expanduser', 'expandvars', 'extsep', 'genericpath', 'getatime', 'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile', 'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath', 'os', 'pardir', 'pathsep', 'realpath', 'relpath', 'samefile', 'sameopenfile', 'samestat', 'sep', 'split', 'splitdrive', 'splitext', 'splitunc', 'stat', 'supports_unicode_filenames', 'sys'] OK ntpath :> normpath . cr <function normpath at 0x000001C511337E18> OK ntpath :> normpath py: help(pop()) Help on function normpath in module ntpath: normpath(path) Normalize path, eliminating double slashes, etc. OK py> path ntpath :> normpath(pop()) . cr C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth OK \ 或者檢查看是否 Windows In [8]: sys.modules.get('nt') <--- None 就是沒有,就不是 windows In [9]: sys.modules.get('sys') Out[9]: <module 'sys' (built-in)> In [10]: \ 更好的方法, yeah! this is it. -- ubuntu -- In [12]: os.name Out[12]: 'posix' -- windows -- OK os :> name . cr nt [/] 有了這個 solution 連 jupyter peforth kernel 的 install 都可以自動化了。 [x] Ubuntu 的問題應該已經解決了,要推廣 peforth 必須趕快 release Tests before a Release v1.14 [x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=False Run setup.bat 做出取消 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [x] 1. python -i -m peforth [x] no-selftest .s words exit [x] 2. python -i -m peforth version drop [x] 3. python import peforth [x] selftest peforth.ok() .s words <--- no parent [x] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> .s cd help bye .s cd help exit [x] 5. repeat 以上 in ubuntu --> copy the wheel to WSL ubuntu --> use virtualenv is fine [/] 考慮 README.rst 改良 [/] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] 所有 run 法帶 selftest: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=True Run setup.bat 更新本地版本以供測試 [x] 1. python -i -m peforth [x] with-selftest .s words exit bye [x] 2. ipython -i -m peforth .' Hello World!!' cr bye [x] 3. ipython import peforth .s words [x] selftest peforth.ok() .s words <--- w/parent [x] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> .s cd help bye .s cd help exit [x] 考慮 README.rst 改良 [x] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] version 改成 1.15 (必須跳過 1.10 會變成 1.1) [x] 直接用測過的 wheel update Pypi [x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 [x] WSL Ubuntu virtualenv weired experience when pip install peforth in a virtualenv --> permission denied --> so I use sudo and this will success but peforth will be installed to global instead of the virtualenv! see https://stackoverflow.com/questions/14665330/pip-requirement-already-satisfied --> The reason why permission denied was peforth-1.14-py3-none-any.whl which was copied by windows and it needs chmod 777 \ see the correct example below: (DeepSpeech) hcchen5600@31ENB667:~/GitHub/DeepSpeech$ chmod 777 peforth-1.14-py3-none-any.whl (DeepSpeech) hcchen5600@31ENB667:~/GitHub/DeepSpeech$ pip install peforth-1.14-py3-none-any.whl Processing ./peforth-1.14-py3-none-any.whl Installing collected packages: peforth Successfully installed peforth-1.14 (DeepSpeech) hcchen5600@31ENB667:~/GitHub/DeepSpeech$ [x] peforth.vm.things 的 peforth.things alias 14:59 2018/03/11 讓 vm.execute() vm.dictate() peforth.ok() 都傳回 vm 以便 support function cascade 19:22 2018/03/11 除了以上,連 stack, push, words, ... etc 都加上去了。 [x] %f magic command 暫無 auto-load, 必須 import peforth 才有 --> 解決了,雖然這樣也好。 "c:\Users\hcche\OneDrive\文件\Jupyter Notebooks\Creating an IPython extension with custom magic commands.ipynb" 討論複製過來如下: [x] 如上述加上 c.InteractiveShellApp.extensions = ["c:\\Users\\hcche\\Downloads\\csvmagic.py"] 之後,無效。參考 [stackoverflow](https://stackoverflow.com/questions/27483637/auto-reload-extension-not-reloading-on-change) 學到用 '%load_ext c:\\Users\\hcche\\Downloads\\csvmagic.py' 在 jupyter notebook 或 ipython 中試試看 . . . 果然是 path 寫法的問題。照以上範例, csvmagic.py 位在 current directory 直接 '%load_ext csvmagic' 就可以了。如果不在 crrent directory 那就是要 importable 則手動放到 site-packages 去亦可,討論如下。 [x] 又或者必須是個 -m 搆得著的 module? 對了!上述的 importable 就是這個意思。--> 手動放進 site-packages (檔名改成 __init__.py) 就 importable 了,試試看 --> 成功!但是必須跑過 '%load_ext csvmagic' 之後才有 %%csv 不會自動 load。 [x] 而且 import csvmagic 也無效;然而經過以下正確安排之後 import peforth 有效,不知何故? [x] 如何自動 load 應該跟 peforth 的 install 方式類似,這表示 csvmagic.py 所做的工作要由 `GitHub\peforth\peforthkernel.py` 來完成 (錯!要由 peforth 的 `__init__.py` 來負責)。其中 peforth %f 具有 line magic 與 cell magic 雙重腳色,該怎麼寫?看這裡:http://ipython.readthedocs.io/en/stable/config/custommagics.html # from IPython.core.magic import (register_line_magic, register_cell_magic) # @register_line_magic # def f(line): # peforth.vm.dictate(line) # # @register_cell_magic # def f(line, cell): # peforth.vm.dictate(cell) from IPython.core.magic import register_line_cell_magic @register_line_cell_magic def f(line, cell=None): if cell is None: peforth.vm.dictate(line) else: peforth.vm.dictate(cell) def load_ipython_extension(ipython): ipython.register_magic_function(f, 'line_cell') # see http://ipython.readthedocs.io/en/stable/api/generated/IPython.core.interactiveshell.html?highlight=register_magic_function [x] (錯!) 放進 GitHub\peforth\peforthkernel.py [x] (錯!) copy 到 c:\Users\hcche\AppData\Roaming\jupyter\kernels\peforth\kernel.json 所指到的位置:"c:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\peforth\\peforthkernel.py" [x] 重新啟動 jupyter notebook --> 結果無效, 這表示上面這段 code 沒有被執行到。可能放在 GitHub\peforth\peforthkernel.py 不對(確定不對),也可能另有某個 .json 要指對地方。看 document 吧! --> 已知!c.InteractiveShellApp.extensions = ['peforth'] 就這行,所以上面這段要放在 peforth 的 __init__.py 才對 (對了)--> 再試試看 ... 還是無效,必須 import peforth 才行。目前這樣可以滿意了。 [x] 我猜是 c.InteractiveShellApp.extensions = ['csvmagic','peforth'] 所在的 profile_default\ipython_config.py 整個都無效之故。先前嘗試 "28 Jupyter Notebook tips, tricks and shortcuts" 該檔的另一個設定也是無效。從 path 裡 有個 /test/ 看來,可能不是正確的檔案。--> 由 %f get_ipython :> ().ipython_dir . cr 得知正確的位置是:`C:\Users\hcche\.ipython` 才對,也就是 `C:\Users\hcche\.ipython\profile_default\ipython_config.py` --> 試試看, 有沒有自動 load_ext . . . 有了!剛改好 `profile_default\ipython_config.py` 就馬上對新開的 jupyter notebook 有效。 [x] ipython 的 magic initialization in __init__.py 要防呆,避免從 python (none ipython) 執行時出問題。判斷有沒有 ipython 的方法要看在哪裡判斷的, peforth __init__.py 裡 好像太早,結果這兩個方法都 always false 而無效,不能自動 load_ext : if 'get_ipython' in globals(): if '__IPYTHON__' in dir(__builtins__): 我看就算了,需要先 import peforth 有它的好處,例如 greeting 會出現在 import 的時候。 [x] 從 jupyter notebook 裡面 debug peforth 的 __init__.py 很方便!用 pdb.set_trace() 設個斷點在 ipython 判斷式前,查看以上兩個式子 --> 在當時都是 false !! 但我找到 這個可以: '__IPYTHON__' in __builtins__.keys() B i n g o ! ! 果然成功了,我發現 __builtins__ 的定義再那之後會變,而 __builtin__ 在那時甚至都還不存在。 [x] Tests before a Release v1.15 [x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=False Run setup.bat 做出取消 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [x] 1. python -i -m peforth [x] no-selftest .s words exit [x] 2. python -i -m peforth version 12345 bye --> check errorlevel [x] 3. python import peforth [x] selftest peforth.ok() .s words <--- no parent [x] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> .s cd help bye .s cd help exit %f %%f magic command [x] 5. repeat 以上 in ubuntu --> pip3 install (/mnt/...the wheel) to WSL ubuntu --> use virtualenv is fine [/] 考慮 README.rst 改良 [/] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] 所有 run 法帶 selftest: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=True Run setup.bat 更新本地版本以供測試 [x] 1. python -i -m peforth [x] with-selftest .s words exit bye [x] 2. ipython -i -m peforth .' Hello World!!' cr bye [x] 3. ipython import peforth .s words [x] selftest peforth.ok() .s words <--- w/parent [x] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> .s cd help bye .s cd help exit [/] 考慮 README.rst 改良 [/] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] 直接用測過的 wheel update Pypi [x] version 改成 1.16 (必須跳過 1.10 會變成 1.1) [x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 [x] README.md needs to improve the installation guide for jupyter notebook support Install peforth kernel for Jupyter Notebook If you have ipython and jupyter installed, do following steps to add peforth as a kernel of Jupyter Notebook, Install peforth kernel for Jupyter Notebook 1. install peforth pip install peforth 2. copy c:\Users\yourname\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth\kernel.json 到 c:\Users\yourname\AppData\Roaming\jupyter\kernels\peforth\kernel.json 如果上面的 target 目錄 kernels\ 或 peforth\ 不存在,則請手動建立這些目錄 3. 編輯剛才這個檔案 c:\Users\yourname\AppData\Roaming\jupyter\kernels\peforth\kernel.json 照您的電腦實際情況,訂正其中的這個 path c:\Users\yourname\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth\peforthkernel.py 以上是我的電腦的範例 [/] 希望這個 installation 能自動化 refer to Ynote : "怎麼加 javascript kernel 進 jupyter notebook" _ijavascript_ [x] setup.bat update 上 Pypi 成功之後,有個 error :batch not found 之類。 upload v1.15 時發現的。應該是因為把 bye comment 掉了,往下看到 batch 的東西了。 [/] v1.15 %f 也發生了 comment 之後如果沒有 whitespace 會被下一行看到的問題 %f __main__ :> census_train['age'].head(2) . cr \ 奇怪,它怎知這 dtype 是 int64? 13:34 18/05/22 複製不出來, 上面這法都忘了怎來的了。 [x] 不認得的 words 自動到 __main__ 裡去找找看 <-- 成功了! v1.16 不認得的 words 自動到 locals 裡去找找看 不認得的 words 自動到 globals 裡去找找看 似乎 project-k 或看怎麼外掛一個序列 methods 用來處理 unknown workds --> 執行一個 word 就叫做 unknown ( 'token' -- thing Y|n) 傳回 True 就表示處理過了,轉回 False 就表示沒處理 (default) 然則顯示 unknown 訊息。 --> 先做 __main__ 的比較簡單 : unknown py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn") py> str(tos())=='Ûnknôwn' if drop false else true then ; // ( token -- thing Y|N) Try to find the unknown in __main__ [x] 開始 support jupyter magics 之後冒出問題,直接跑 ipython -m peforth 出 error 如下。 先進 ipython 之後再 import peforth 就沒問題。 c:\Users\hcche\Documents\GitHub>ipython -i -m peforth Python 3.6.0 (v3.6.0:41df79263a11, Dec 23 2016, 08:06:12) [MSC v.1900 64 bit (AMD64)] Type 'copyright', 'credits' or 'license' for more information IPython 6.2.1 -- An enhanced Interactive Python. Type '?' for help. p e f o r t h v1.16 source code http://github.com/hcchengithub/peforth Type 'peforth.ok()' to enter forth interpreter, 'exit' to come back. --------------------------------------------------------------------------- NameError Traceback (most recent call last) c:\users\hcche\appdata\local\programs\python\python36\lib\runpy.py in run_module(mod_name, init_globals, run_name, alter_sys) 199 Returns the resulting top level namespace dictionary 200 """ --> 201 mod_name, mod_spec, code = _get_module_details(mod_name) 202 if run_name is None: 203 run_name = mod_name c:\users\hcche\appdata\local\programs\python\python36\lib\runpy.py in _get_module_details(mod_name, error) 140 try: 141 pkg_main_name = mod_name + ".__main__" --> 142 return _get_module_details(pkg_main_name, error) 143 except error as e: 144 if mod_name not in sys.modules: c:\users\hcche\appdata\local\programs\python\python36\lib\runpy.py in _get_module_details(mod_name, error) 107 # Try importing the parent to avoid catching initialization errors 108 try: --> 109 __import__(pkg_name) 110 except ImportError as e: 111 # If the parent or higher ancestor package is missing, let the c:\Users\hcche\Documents\GitHub\peforth\__init__.py in <module>() 166 # Define peforth magic command, %f. 167 @register_line_cell_magic --> 168 def f(line, cell=None): 169 if cell is None: 170 vm.dictate(line) c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\IPython\core\magic.py in magic_deco(arg) 227 break 228 else: --> 229 raise NameError('Decorator can only run in context where ' 230 '`get_ipython` exists') 231 NameError: Decorator can only run in context where `get_ipython` exists c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py:2598: UserWarning: Unknown failure executing module: <peforth> warn('Unknown failure executing module: <%s>' % mod_name) [x] ipython -m peforth 會出問題,可能是因為 get_ipython 當時還沒有 ready <-- 對 NameError: Decorator can only run in context where `get_ipython` exists c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py:2598: UserWarning: Unknown failure executing module: <peforth> warn('Unknown failure executing module: <%s>' % mod_name) 只要進了 ipython command prompt or jupyter notebook 都沒問題 In [2]: 'get_ipython' in globals() Out[2]: True --> 用對的方法檢查 ipython magic 存不存在即可,以上 error message 提供了線索 查看 python token 是否 defined 必須用 try-except: try: flag = "InteractiveShell" in str(get_ipython) except: flag = False if flag: from IPython.core.magic import register_line_cell_magic ... snip .... 注意, 解掉問題之後,如今: 1. jupyter notebook 完全沒問題。 2. 用 ipython -i -m peforth 跑起來的,exit 到 ipython 不認得 magic commands: In [1]: %f UsageError: Line magic function `%f` not found. 3. 先進 ipython 然後 import peforth 的才認得 magic commands. [x] Tests before releasing v1.16 [x] 所有 run 法帶 selftest: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=True [x] 直接從 GitHub folder 執行 python peforth --> .s cd help exit [x] Run setup.bat 更新本地 pip installed 版本以供測試 [x] 1. python -i -m peforth [x] with-selftest .s words exit bye [/] 2. ipython -i -m peforth .' Hello World!!' cr bye --> 目前有問題 [x] 3. ipython import peforth .s words [x] selftest peforth.ok() .s words <--- w/parent [x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。 [x] 4. jupyter notebook import peforth %f ." Hello FORTH!" %%f Now we redefine the 'unknown' command that was doing nothing : unknown ( token -- thing Y|N) // Try to find the unknown token in __main__ py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn") py> str(tos())=="Ûnknôwn" if drop false else true then ; \ here after, when FORTH come accross an unknown token, instead of an error \ message, it try to find the token in python __main__ module name space. y = 'abc' %f y . cr %f yy . cr [x] 考慮 README.rst 改良 [x] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=False [x] Run setup.bat 直接從 GitHub folder 執行 python peforth 先確定一把 --> .s cd help exit [x] Run setup.bat 做出取消 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [x] 1. python -i -m peforth [x] no-selftest .s words exit [x] 2. python -i -m peforth version 12345 bye --> echo %errorlevel% [x] 3. python import peforth [x] selftest peforth.ok() .s words <--- no parent [x] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> .s cd help exit %f %%f magic command [/] 5. repeat 以上 in ubuntu --> pip3 install (/mnt/...the wheel) to WSL ubuntu --> use virtualenv is fine [/] 考慮 README.rst 改良 [/] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] 直接用測過的 wheel update Pypi twine upload dist/* ID, password search my Ynote with pypi _account_ [x] version 改成 1.17 (必須跳過 1.10 會變成 1.1) [x] test mybinder.org to view peforth > notebook > *.ipynb 不行, 猜測還是 _the_path_issue_ 的問題 <--- no, setup.py issue, see below. [x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 [x] v1.16 released [x] (create) in peforth.f 認為當有 command line 時就不要有 reDef 警告,讓畫面清爽 且 reDef 是常態。但是到了 jupyter notebook 底下, 他一定有 command line jupyter notebook 下 %f py> commandline.strip() tib. ==> -f C:\Users\hcche\AppData\Roaming\jupyter\runtime\kernel-17e1c697-6363-49d3-b3af-81708a468835.json (<class 'str'>) 因此 reDef 警告就都消失了也不對。因為 jupyter notebook 之下 command line 完全 無用,因此原來的判斷方法可以保持,但是要排除 jupyter notebook 的場合。 結論是 --> ('jupyter' in str(sys.modules) or not commandline.strip()) [x] Tests before releasing v1.17 [x] 所有 run 法帶 selftest: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=True [x] 直接從 GitHub folder 執行 python peforth --> .s cd help exit [x] Run setup.bat 更新本地 pip installed 版本以供測試 [x] 1. python -i -m peforth [x] with-selftest .s words exit bye [x] 2. ipython -i -m peforth .' Hello World!!' cr bye [x] 3. ipython import peforth .s words [x] selftest peforth.ok() .s words <--- w/parent [x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。 [x] 4. jupyter notebook kernel > restart and clear outputs import peforth %%f 擴充、修訂一下 peforth 的行為模式,讓它認得 jupyter notebook 下的 globals. Dot . 也改寫了,適合 jupyter notebook 學習環境使用。 : unknown ( token -- thing Y|N) // Try to find the unknown token in __main__ py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn") py> str(tos())=="Ûnknôwn" if drop false else true then ; /// here after, when FORTH come accross an unknown token, instead of alerting /// it try to find the token in python __main__ module name space. : . tib. ; // ( tos -- ) A better dot that also prints the entire command line /// For experiments that need to show both question and result. /// "" . prints the command line only, w/o the TOS. : path-to-find-modules ( <path> -- ) // Add path to sys.path so "import module-name" can find the module CR word trim ( "path" ) py: sys.path.append(pop()) ; code \ print(nexttoken('\n')) end-code // Redefine \ command to print the comment line x = 123 %f x . x . \ ==> 123 (<class 'int'>) [/] 考慮 README.rst 改良 [/] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=False [x] Run setup.bat 直接從 GitHub folder 執行 python peforth 先確定一把 --> .s cd help exit [x] Run setup.bat 做出取消 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [x] 1. python -i -m peforth [x] no-selftest .s words exit [x] 2. python -i -m peforth version 12345 bye --> echo %errorlevel% [x] 3. python import peforth [x] no selftest, peforth.ok() .s words <--- no parent [x] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> *debug* ok> .s cd help exit %f %%f magic command [x] 5. repeat 以上 in ubuntu [x] pip uninstall peforth [x] pip install (/mnt/...the wheel) to WSL ubuntu [x] ipython -m peforth [x] ipython , import peforth , magic commands [x] 直接用測過的 wheel update Pypi 繼續剛才的 setup.bat 即可,必要時: twine upload dist/* ID, password search my Ynote with pypi _account_ --> 出錯! GFW? HTTPError: 403 Client Error: Invalid or non-existent authentication information. for url: https://upload.pypi.org/legacy/ --> retry 看看 ... 這次就成功了! c:\Users\hcche\Desktop\peforth-master>twine upload dist/* Uploading distributions to https://upload.pypi.org/legacy/ Enter your username: hcchen5600 Enter your password: Uploading peforth-1.17-py3-none-any.whl 12%|...snip.... c:\Users\hcche\Desktop\peforth-master> --> 很奇怪,pypi.org 網頁上已經 upgraded 到 1.17 版了, WSL Ubuntu 下試過 pip uninstall peforth -> pip install peforth 也到 1.17 版了,就是 Windows DOS 下怎麼試都還是 1.16 ! 不管了,晚點再看 --> 真的過幾分鐘就好了!! [x] version 改成 1.18 (必須跳過 1.10 會變成 1.1) [x] test mybinder.org [http://github.com/hcchengithub/peforth][master][notebook] 不行, 看來是 setup.py 的問題 --> see Ynote: "mybinder.org FileNotFoundErErrorno 2 No such file or directory" --> RI: 不是 bug, setup.py 改名不要讓 mybinder.org 看到即可。 2018.12.15 這可能是為何名為 setup.py.whl 的原因,我正在研究 command line: python setup.py install 也許就是 peforth 的 install from source 的正解。 [x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 [x] v1.17 released --> verion.txt 跳成 v1.18 [x] v1.14 v1.15 v1.16 on WSL Ubuntu, virtualenv , _the_path_issue_ ipython still failed, message: ...snip... ~/tmp/deepspeech-venv/lib/python3.6/site-packages/peforth/__init__.py in readTextFile(pathname) 33 34 def readTextFile(pathname): ---> 35 f = open(pathname,'r',encoding='utf-8') 36 # for line in f: 37 s = f.read() FileNotFoundError: [Errno 2] No such file or directory: '/usr/local/lib/site-packages/peforth/version.txt' <--- 因為 .py 與其他 files 被分開放了 ...snip... [x] https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory [x] v1.17 還是用 site.getsitepackages() 加上一點暴力 deli = '\\' if os.name == 'nt' else '/' path = "something wrong peforth path not found" for p in (pp for pp in site.getsitepackages() if pp.endswith("site-packages")): dirs = p.split(deli) if dirs[-2] != 'lib': # expecting 'lib' dirs = dirs[:-2] + [dirs[-1]]; # if -2 is not 'lib' then remove it (pythonM.N or the likes) if 'lib' in dirs: # extra check, may not be necessary path = deli.join(dirs) + deli + "peforth" + deli [x] test with WSL Ubuntu virtualenv --> failed [x] v1.17 failed for WSL Ubuntu in both with and without virtualenv. <-- v1.21 FP 問題點: When without virtualenv: hcchen5600@WKS-4AEN0404:~$ python -m peforth Traceback (most recent call last): ...snip... File "/home/hcchen5600/.local/lib/python3.6/site-packages/peforth/__init__.py", line 67, in <module> exec(readTextFile(path + "version.txt"),{},locals()) File "/home/hcchen5600/.local/lib/python3.6/site-packages/peforth/__init__.py", line 35, in readTextFile f = open(pathname,'r',encoding='utf-8') FileNotFoundError: [Errno 2] No such file or directory: 'something wrong peforth path not foundversion.txt' When with virtualenv: (playground) hcchen5600@WKS-4AEN0404:~$ python -m peforth Traceback (most recent call last): ...snip... File "/home/hcchen5600/playground/lib/python3.6/site-packages/peforth/__init__.py", line 57, in <module> for p in (pp for pp in site.getsitepackages() if pp.endswith("site-packages")): AttributeError: module 'site' has no attribute 'getsitepackages' 答案: 還是這篇文章:https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory [x] 正確答案先直接列出來 w/o virtualenv /home/hcchen5600/.local/lib/site-packages/peforth/version.txt with virtualenv /home/hcchen5600/playground/lib/site-packages/peforth/version.txt w/o virtualenv C:\Users\hcche\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth\version.txt [x] 方法一、 sys.path 終極答案 Ubuntu with virtualenv 可用(要剃除"python3.6") >>> import sys >>> [f for f in sys.path if f.endswith('site-packages')] ['/home/hcchen5600/playground/lib/python3.6/site-packages'] Ubuntu w/o virtualenv 可用(要剃除"python3.6") >>> import sys >>> [f for f in sys.path if f.endswith('site-packages')] ['/home/hcchen5600/.local/lib/python3.6/site-packages'] Windows w/o virtualenv 正確 >>> [f for f in sys.path if f.endswith('site-packages')] ['C:\\Users\\hcche\\AppData\\Roaming\\Python\\Python36\\site-packages', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages'] --> 用這個方法只要把 v1.17 的 __init__.py 原來 "site.getsitepackages()" 改成 "sys.path" 即可,真是的! [x] 方法二、 site.getsitepackages() <--- v1.16 失敗,三個中最差的,我的媽! python -c "import site; print([f for f in site.getsitepackages() if f.endswith('site-packages')])" Windows w/o virtualenv 正確 python -c "import site; print([f for f in site.getsitepackages() if f.endswith('site-packages')])" ['C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages'] Ubuntu w/o virtualenv 錯誤! hcchen5600@WKS-4AEN0404:~$ python Python 3.6.5 (default, May 3 2018, 10:08:28) [GCC 5.4.0 20160609] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import site >>> site.getsitepackages() ['/usr/local/lib/python3.6/dist-packages', '/usr/lib/python3/dist-packages', '/usr/lib/python3.6/dist-packages'] Ubuntu with virtualenv 直接陣亡,根本不 support 這個命令! (playground) hcchen5600@WKS-4AEN0404:~/playground/bin$ python -c "import site; print([f for f in site.getsitepackages() if f.endswith('site-packages')])" Traceback (most recent call last): File "<string>", line 1, in <module> AttributeError: module 'site' has no attribute 'getsitepackages' [x] 方法三、 不行! <--- python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())" Windows w/o virtualenv 正確 c:\Users\hcche\Downloads>python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())" C:\Users\hcche\AppData\Local\Programs\Python\Python36\Lib\site-packages Ubuntu w/o virtualenv 錯誤! hcchen5600@WKS-4AEN0404:~$ python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())" /usr/lib/python3/dist-packages <--- 錯了,不能用。 Ubuntu with virtualenv 可用(要剃除"python3.6") (playground) hcchen5600@WKS-4AEN0404:~/playground/bin$ python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())" /home/hcchen5600/playground/lib/python3.6/site-packages [x] 意外發現 python -m peforth include 1.f 時, 1.f 裡面不認得 ok() vm.ok() RI: the recent __init__.py "run once" section that runs quit.f that runs command line arguments is *before* the definition of ok()! --> I move it down to the bottom then problem is gone. This solution will be released with v1.18. [x] mybinder.org 跑不起來, peforth/version.txt file not found <--- RI: setup.py 改名就好了, expecting v1.18 See my Ynote: "mybinder.org FileNotFoundErErrorno 2 No such file or directory" --> 我猜是 setup.py 的檔案結構,在 Desktop\peforth-master\ 處多用了一個 peforth folder 如此一來, 從 project 本身的 setup.py 所在之處來看 version.txt 就不在 peforth/version.txt 而直接在 version.txt 才對。 v1.16 先直接修改 Desktop\peforth-master\setup.py 做一版 wheel 在 local 看成不成功, 若成功就證明研究生的檔案結構多一個 peforth 是沒必要的了,改掉就有機會了。 --> 真的做成了, 把 peforth/ 裡的東西都移上來, setup.py 改掉,不要 peforth/, 如下從 working directory 執行, 成功了! c:\Users\hcche\Desktop\peforth-master>pip wheel --wheel-dir=dist . Processing c:\users\hcche\desktop\peforth-master Building wheels for collected packages: peforth Running setup.py bdist_wheel for peforth ... done Stored in directory: c:\users\hcche\desktop\peforth-master\dist Successfully built peforth c:\Users\hcche\Desktop\peforth-master> --> 這表示根本不必搞到 Desktop\peforth-master 直接用 local GitHub repo 就可以 了 --> 錯錯錯! Desktop\peforth-master\peforth folder 是必須的 --> peforth/version.txt file not found 應該還是 _the_path_issue_ [x] 做成 1.17 release 以便查看 mybinder.org 解了沒? --> failed !! see also : Ynote : "研究 peforth 的 path 到底正確該如何" [x] setup.py 是研究生為了做出 peforth 的 whl 而設。既然 mybinder.org 也要來 看,就一定要更講究一點,我想就是這個原因.... --> 可能要兩個 setup.py, 一個 at peforth folder, the other is for building .whl. when building .whl, the setup.py is at parent folder and is that a must ? --> anywhere>pip wheel --wheel-dir=dist c:\Users\hcche\Desktop\peforth-master c:\Users\hcche\Desktop\peforth-master>pip wheel --wheel-dir=dist peforth 以上都可以 build 出 peforthxxxxx.whl --> peforth/setup.py 被 mybinder.org 看到了才出的問題, 把它改名成 setup.py.disabled 看看... RI: 把 setup.py 名字改掉就好了!!! 不要讓 mybinder.org 看到 setup.py 即可。 --> 如前述,我們的 setup.py 是用來做 .whl 的,要給 pip 看的,不是要給 mybinder.org 看的。 [x] Merge 到 master 但可以不必急著 release, 純粹是 setup.py 的問題,跟程式無關。 只要讓 mybinder.org 能跑,改 github 上的 setup.py 成 setup.py.whl 即可。 --> expecting v1.18 [x] Tests before a Release v1.18 <--- on pypi.org already [x] 所有 run 法帶 selftest: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=True [取消] 直接從 GitHub folder 執行 python peforth --> 等於是 -m peforth [x] Run setup.bat 更新本地 pip installed 版本以供測試 [x] 1. python -i -m peforth [x] with-selftest .s words exit bye [x] 2. ipython -i -m peforth .' Hello World!!' cr bye [x] 3. ipython import peforth .s words [x] selftest peforth.ok() .s words <--- w/parent [x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。 [x] 4. jupyter notebook kernel > restart and clear outputs import peforth %%f 擴充、修訂一下 peforth 的行為模式,讓它認得 jupyter notebook 下的 globals. Dot . 也改寫了,適合 jupyter notebook 學習環境使用。 : unknown ( token -- thing Y|N) // Try to find the unknown token in __main__ py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn") py> str(tos())=="Ûnknôwn" if drop false else true then ; /// here after, when FORTH come accross an unknown token, instead of alerting /// it try to find the token in python __main__ module name space. : . tib. ; // ( tos -- ) A better dot that also prints the entire command line /// For experiments that need to show both question and result. /// "" . prints the command line only, w/o the TOS. : path-to-find-modules ( <path> -- ) // Add path to sys.path so "import module-name" can find the module CR word trim ( "path" ) py: sys.path.append(pop()) ; code \ print(nexttoken('\n')) end-code // Redefine \ command to print the comment line x = 123 %f x . x . \ ==> 123 (<class 'int'>) [x] 5. jupyter notebook --> peforth kernel [/] 考慮 README.rst 改良 [/] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f ' <selftest> :: enabled=False [x] Run setup.bat 做出取消 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [x] 1. python -i -m peforth [x] no-selftest .s words exit [/] 2. python -i -m peforth version 12345 bye --> echo %errorlevel% [/] 3. python import peforth [/] no selftest, peforth.ok() .s words <--- no parent [/] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> *debug* ok> .s cd help exit %f %%f magic command [x] 5. repeat 以上 in ubuntu [x] pip uninstall peforth [x] pip install (/mnt/...the wheel) to WSL ubuntu [x] ipython -m peforth [x] ipython , import peforth , magic commands [x] 直接用測過的 wheel update Pypi 繼續剛才的 setup.bat 即可,必要時: twine upload dist/* ID, password search my Ynote with pypi _account_ [x] test mybinder.org @ [http://github.com/hcchengithub/peforth][develop][notebook] 這個跟 pypi.org 無關,只要 github 有 push 上去馬上生效。 [x] pypi.org 網頁上已經 upgraded 到 1.18 版了, WSL Ubuntu 下試過 pip uninstall peforth -> pip install peforth 也到 1.17 版了,就是 Windows DOS 下怎麼試都還是 1.16 ! 不管了,晚點再看 --> 真的過幾分鐘就好了!! [x] WSL Ubuntu w/o virtualenv --> python -m peforth ... ok [x] WSL Ubuntu with virtualenv --> python -m peforth ... ok [/] test colab --> v1.18 還是 failed 還是 path 的問題 :-( [x] version 改成 1.19 (必須跳過 1.10 會變成 1.1) [x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 [x] test colab --> v1.18 還是 failed 還是 path 的問題 :-( v1.18 is failed on colab, the chance is that v1.16 works fine on colab. [x] use v1.16 (pip install peforth==1.16 on colab) to check sys.path & site.getsitepackages() ---- from collab with peforth v1.16 ---- import site site.getsitepackages() ['/usr/local/lib/python3.6/dist-packages', '/usr/lib/python3/dist-packages', '/usr/lib/python3.6/dist-packages'] import sys sys.path ['', '/env/python', '/usr/lib/python36.zip', '/usr/lib/python3.6', '/usr/lib/python3.6/lib-dynload', '/usr/local/lib/python3.6/dist-packages', '/usr/lib/python3/dist-packages', '/usr/local/lib/python3.6/dist-packages/IPython/extensions', '/content/.ipython'] -------- actual peforth path on Google colab --------------- !ls /usr/local/lib/python3.6/dist-packages/peforth __init__.py __main__.py peforthkernel.py projectk.py __pycache__ setup.py !ls /usr/local/lib/site-packages/peforth kernel.json peforthkernel.py __pycache__ version.txt peforth.f peforth.selftest quit.f [/] So, the answer is clear here . . . try all possible directories with some guess to find /peforth/version.txt that's doable [x] can be setup.py's problem. I don't think all modules are facing the same annoying problem. --> try to simplify setup.py.whl --> RTFD : https://packaging.python.org/guides/distributing-packages-using-setuptools/?highlight=data_files#data-files [x] testing c:\Users\hcche\Desktop\peforth-master\setup.py.improved that uses package_data={...} instead of data_files=[...] in sety.py --> 用改過的 setup.py 重作 wheel 很奇怪,必須用 github\peforth\setup.bat 做否則 pip wheel 根本不 build 總之有個辦法可行做出了 v1.19 See Ynote: "Pack peforth to peforth.whl" > "2018/07/02 13:06" 的討論。 --> 直接看 ~.whl (zip檔)就知道成功了! [x] v1.18 用 sys.path 的加工不對了 --> 改掉 [x] path="" 只有 setup.bat 要看才出錯,真的不行嗎? --> 真的不行,讀 version.txt 時的 os.getcwd() 真的就是當時的 working directory,這樣不行。 --> 所以用 sys.path 的方法還是要用 --> windows 本來就沒錯了呀! [x] 改掉 setup.py 的好處是 data files 與 .py 都在一起了,但是 path 如何取得 還是個問題 -- Ubuntu and colab 不能兩全 --> 用 sys.path 去 serch peforth/version.txt 還是唯一的辦法 ... 不難: path = "something wrong peforth path not found" for p in (pp for pp in sys.path if pp.endswith("site-packages")): if os.path.isfile(p + deli + 'peforth' + deli + 'version.txt'): path = p + deli + 'peforth' + deli break vm.path = path pdb.set_trace() # *debug* [x] windows (none anaconda virtualenv), WSL Ubuntu w/o virtualenv, with virtualenv --> all pass! [x] Tests before a Release v1.19 --> v1.21 actually [x] 所有 run 法帶 selftest: [x] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=True [x] Run setup.bat 更新本地 pip installed 版本以供測試 [x] 1. python -i -m peforth [x] with-selftest .s words exit bye [x] 2. ipython -i -m peforth .' Hello World!!' cr bye [x] 3. ipython import peforth .s words [x] selftest peforth.ok() .s words <--- w/parent [x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。 [x] 4. jupyter notebook kernel > restart and clear outputs import peforth %%f 擴充、修訂一下 peforth 的行為模式,讓它認得 jupyter notebook 下的 globals. Dot . 也改寫了,適合 jupyter notebook 學習環境使用。 : unknown ( token -- thing Y|N) // Try to find the unknown token in __main__ py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn") py> str(tos())=="Ûnknôwn" if drop false else true then ; /// here after, when FORTH come accross an unknown token, instead of alerting /// it try to find the token in python __main__ module name space. : . tib. ; // ( tos -- ) A better dot that also prints the entire command line /// For experiments that need to show both question and result. /// "" . prints the command line only, w/o the TOS. : path-to-find-modules ( <path> -- ) // Add path to sys.path so "import module-name" can find the module CR word trim ( "path" ) py: sys.path.append(pop()) ; code \ print(nexttoken('\n')) end-code // Redefine \ command to print the comment line x = 123 %f x . x . \ ==> 123 (<class 'int'>) [x] 5. jupyter notebook --> peforth kernel --> .s words [/] 考慮 README.rst 改良 [/] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [x] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=False [x] Run setup.bat 做出取消 selftest 的 wheel [x] pip uninstall peforth [x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [x] 1. python -i -m peforth [x] no-selftest .s words exit [x] 2. python -i -m peforth version 12345 bye --> echo %errorlevel% [x] 3. python import peforth [x] no selftest, peforth.ok() .s words <--- no parent [x] 1234 bye check echo %errorlevel% [x] 4. jupyter notebook --> *debug* ok> .s cd help exit %f %%f magic command [x] 5. repeat 以上 in ubuntu [x] pip uninstall peforth [x] pip install (/mnt/...the wheel) to WSL ubuntu [/] ipython -m peforth [/] ipython , import peforth , magic commands [x] 直接用測過的 wheel update Pypi 繼續剛才的 setup.bat 即可,必要時: twine upload dist/* ID, password search my Ynote with pypi _account_ [x] pypi.org 網頁上已經 upgraded 到 1.19 版了, 若不行,晚點再看,過幾分鐘就好。 [x] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth [x] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth [x] Windows DOS 下試 [x] test mybinder.org @ [http://github.com/hcchengithub/peforth][develop][notebook] 這個跟 pypi.org 無關,只要 github 有 push 上去馬上生效。 [x] test colab --> v1.19 --> shit, 又錯了! 不能限定要 site-packages, dist-packages 也要接受 deli = '\\' if os.name == 'nt' else '/' path = "wrong" for p in sys.path: if os.path.isfile(p + deli + 'peforth' + deli + 'version.txt'): path = p + deli + 'peforth' + deli break 以上這改就對了,出 v1.21 版吧! Shit shit . . . [x] __init__.py [x] rebuild setup.bat [x] release v1.21 to pypi.org [x] test colab ... !pip install peforth==1.21 要等一等。。。 v1.21 成功了! 嗚嗚嗚 [x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 [x] version 改成 1.22 (必須跳過 1.20 會變成 1.2) [x] 14:48 2018-12-09 python object (attributes -> values) and hash table or dictionary (keys --> values) are confusing me especially when JavaScript sees both the samething. The python 'dir' function lists an object's attributes and JSON can stringify a hash table to a readable string. Let's make an experient: \ o1 is a dict py> {'a':11,'b':22} constant o1 OK o1 tib. --> {'a': 11, 'b': 22} \ it's a dict so it's shown as a dict OK o1 :> keys() . cr --> dict_keys(['a', 'b']) \ dict has keys OK o1 :> values() . cr --> dict_values([11, 22]) \ dict has values \ it's also an ojbect OK o1 dir . cr \ so it has attributes --> ['clear', 'copy', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'] OK o1 stringify . cr { "a": 11, "b": 22 } OK 這樣看來,dict 與 object 的混淆是 JavaScript user 的問題。 任何東西都是 object 而只有 dict 才有 hash table. 用 (see) dir .members 查看 object 的 attributes, 用 (see) keys values 查看 dict. 用 stringify 查看 dict'fy 之後的 任何東西。 --> 結論是在 help (see) 裡講清楚就好了。 來自 jeforth 的 obj>keys 與 dir 或 keys 重複所以很少用了。 [x] Install peforth from source ---- 2018.12.15 懂得用 python setup.py install 需要修改 ---- [x] Ynote: "研究 install peforth from source 的方法" 已經成功。 [x] 結論是: peforth/ 目錄結構要遷就研究生的安排,改變原先其實不太自然的執行方式: C:\Users\hcche\Documents\GitHub\>python peforth 變成從 peforth 目錄裡面執行,這很好哇! [X] v1.22 1.23 __main__.py 還是用 import peforth 的,沒意思 --> 有 support test.py 取代 __main__.py 供 developing debugging 用 [x] pywinio repo 裡面也是又有一個 pywinio/ folder, 將來 peforth 也是這樣。 [x] 照研究生的目錄結構改 GitHub/peforth c:\Users\hcche\Documents\GitHub\peforth\.. Directories Files -------------------- --------------------------- .git\ .gitattributes .ipynb_checkpoints\ LICENCE __pycache__\ admin.bat notebook\ requirements.txt peforth\ LICENSE peforth.egg-info\ README.md playground\ README.rst setup.bat setup.py setup.py.whl log.txt .gitignore c:\Users\hcche\Documents\GitHub\peforth\peforth\.. Directories Files -------------------- --------------------------- __main__.py kernel.json peforthkernel.py projectk.py peforth.selftest version.txt __init__.py quit.f peforth.f [x] remove existing peforth so aso to try setup.py install Python on my computer at home is anaconda, so though that I have to remove it by "conda uninstall" command. That was wrong. Do it by pip as usual works fine. see Ynote:"研究 install peforth from source 的方法" for the log. [x] now try "python setup.py install" it works !!!! 如何查看 setup.py 的 help: c:\Users\hcche\Documents\GitHub\peforth>python setup.py --help [x] setup.bat 可以大簡化了。 [V1.22之後的新版] 打包步驟 2018/12/16 11:02 See my Ynote: "Pack peforth to peforth.whl" 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel 得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist 3. 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/* 需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note" 4. pip uninstall peforth 然後再 pip install peforth 試驗看看。 5. 完成! [x] 13:27 2019-03-06 code ... end-code 可以取 xt.__doc__ 2nd line 當作 help code txt2json # ( txt -- dict ) Convert given string to dictionary push(json.loads("".join([ c if c != "'" else '"' for c in pop()]))) end-code ' txt2json :> xt :> __doc__ --> def xt(_me=None): ### txt2json ### # ( txt -- dict ) Convert given string to dictionary push(json.loads("".join([ c if c != "'" else '"' for c in pop()]))) (<class 'str'>) 18:04 2019-05-09 寫好了: # projectk.py 裡面 # The basic FORTH word 'end-code's run time. def doendcode(_me=None): global compiling if compiling!="code": panic("Error! 'end-code' a none code word.") current_word_list().append(Word(newname,newxt)) last().vid = current; last().wid = len(current_word_list())-1; last().type = 'code'; # --------- mm = re.match(r"^.*?#\s*(.*)$", last().xt.__doc__.split('\n')[1]) last().help = mm.groups()[0] if mm and mm.groups()[0] else "" # --------- wordhash[last().name] = last(); compiling = False; --> py> doendcode .source <---- 看到對的 source code 了 [x] 試驗定義一個 code word 查看他的 help 果然第一行的 # foo bar 有被抓進去當 help 了。 [X] unkown debug locals() 的說明 copy 過來 older unsync'ed notes on my LRV2 v1.22 既然 peforth 主要都是用來配合 jupyter notebook trace code, set breakpoints, ... etc. unknown and ... and # should be added into the built-in words, plus the ability to view local variables. [x] I remember that I have done making 'unknown' predefined . . . no. 16:51 2019-01-12 I am now working on making 'unknown' to try locals. __main__ is an object so global variables are accessed by getattr() however locals and globals are dictionary that should be accessed by dict.get(key,default) instead. see https://stackoverflow.com/questions/3089186/python-getattr-equivalent-for-dictionaries [x] done an example @ http://localhost:8888/notebooks/OneDrive/%E6%96%87%E4%BB%B6/Jupyter%20Notebooks/Siraj%20make_a_neural_net_live_demo.ipynb Source Code =========== none value _locals_ // ( -- dict ) locals passed down from ok() false value debug // ( -- flag ) enable/disable the ok() breakpoint : unknown ( token -- thing Y|N) // Try to find the unknown token in __main__ or _locals_ _locals_ if \ in a function ( token ) _locals_ :> get(tos(),"Ûnknôwn") ( token, local ) py> str(tos())!="Ûnknôwn" ( token, local, unknown? ) if ( token, local ) nip true exit ( return local Y ) else drop ( token ) then then ( token ) py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn") ( thing ) py> str(tos())=="Ûnknôwn" if ( thing ) drop false else true then ; /// Example: Set a breakpoint in python code like this: /// if peforth.execute('debug').pop() : peforth.push(locals()).ok("bp>",cmd='to _locals_') /// Example: Save locals for investigations: /// if peforth.execute('debug').pop() : peforth.push(locals()).dictate('to _locals_') /// That enters peforth that knows variables in __main__ and locals at the breakpoint. /// 'exit' to leave the breakpoint and forget locals. : exit ( -- ) // ( -- ) Exit the breakpoint forget locals and continue the process none to _locals_ py: vm.exit=True ; code # print(nexttoken('\n')+'\n') end-code // print the comment line after # : --> ( result -- ) // Print the result with the command line. py> tib[:ntib].rfind("\n") py> tib[max(pop(),0):ntib].strip() ( result cmd-line ) s" {} {} ({})" :> format(pop(),tos(),type(pop())) . cr ; /// Good for experiments that need to show command line and the result. [X] 10:48 2019-05-11 older note 開發中,不要動到 pip'ed peforth 出錯很麻煩,所以想要從 working folder 執行 不要每次都得先 pip install 改入 site-packages [x] __main__.py 當初為何他媽 import peforth 有屁用?就是要跑本地版本試驗改過的東西才有意義呀! --> 15:48 2019-05-11 應該是 path 搞不定,簡化問題 (Since commit c3d7677 on Oct 8, 2017)。 __main__.py 是用 python -m peforth 執行時的 entry,必須照顧。 11:26 2019-05-11 while __init.py__ is 'import peforth' entry point. --> 11:24 2019-05-11 __main__.py 就是 run c:\Users\hcche\Documents\GitHub\peforth>python peforth and c:\Users\hcche\Documents>python -m peforth 時被執行的入口 see https://www.tuicool.com/articles/iYRfe2 https://stackoverflow.com/questions/44977227/how-to-configure-main-py-init-py-and-setup-py-for-a-basic-package --> 11:51 2019-05-11 how about to have test.py that does what __main__.py is supposed to do when running ~GitHub\peforth>python peforth? --> this is a good idea, but the path in __init__.py will be wrong, deal with it!! --> 從 __init__.py 裡面處理 path 處添加可能找到 version.txt 的地方即可。 成功。 --> 成功了,能直接執行就好,不一定要堅持像早期一樣執行 peforth 目錄。 執行方法: c:\Users\hcche\Documents\GitHub\peforth\peforth>python test.py __run__.py --> 最終命名為 test.py 最自然 # 各種方法都試過,最後還是用 exec(open().read()) 最像 include # from . import __init__ # from __init__ import ok # import subprocess; subprocess.call("__init__.py", shell=True) exec(open("__init__.py").read()) # this is like: include __init__.py ok('\n') [X] __main__.py 還是要用 import peforth 的,若不然一開始 open("__init__.py") 就 file not found 了。 而 test.py 當然是在對的 directory 之下才能執行,所以叫做 test.py ;-D [x] 若要餵進 "python test.py foo bar" 執行 command line 則 test.py 就要用來分辨 是否「從 ipython, jupyternotebook 執行」 (參見 quit.f) 所以 test.py 檔名 就不能改了,要改連 quit.f 也要一起改。或者改進 quit.f 裡分辨 ipython 的方法。 \ ~~~~~~ quit.f ~~~~~~ \ When in ipython or jupyter notebook the command line is used by \ ipython already. In jupyter notebook, it looks like: \ \ vm.commandline ----------------------------------------------------------------------------------. \ sys.argv[0] --------. | \ | | \ V V \ --------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------------------------- \ c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\ipykernel_launcher.py -f C:\Users\hcche\AppData\Roaming\jupyter\runtime\kernel-4be53345-1ddd-47c2-bef2-5e9801688f3f.json \ So peforth can't support command line statements for ipython and jupyter notebook. \ For none ipython cases, I have no better idea than to check sys.argv[0] for '.py' \ and the likes so far 2019-05-15. See the following code, the filename 'test.py' is \ fixed-coded here therefore. \ [X] command line 也是跑 site-package 之外的 .f 檔的方法,例如: c:\Users\hcche\Documents\GitHub\peforth\peforth>python test.py include ..\playground\misc.f c:\Users\hcche\Documents\>python -m peforth include GitHub\peforth\playground\misc.f 這兩行都可以。 [x] 18:35 2019-05-09 我忘了 peforth 要怎麼 maintain 了!!!! 以上程式要改到哪裡去? --> 直接在 github working directory 修改 --> 這樣 run 到的還是 installed 到 site-packages 的版本,因為 __main__.py 其實是 import peforth c:\Users\hcche\Documents\GitHub\peforth>python peforth 16:48 2019-05-11 這個早期的 run 法如今 改成 c:\Users\hcche\Documents\GitHub\peforth\peforth> python test.py --> 16:48 2019-05-10 奇怪 LRV2 OA 上 pip list 看到的 peforth 是 1.21!! 但是 python -m peforth 跑到的是 1.22,經過 pip uninstall peforth 之後 馬上 pip list 卻看到了 peforth 1.23 (對了) [x] 16:38 2019-05-22 release v1.23 時在 T550 又看到類似現象: pip uninstall peforth 之後有把 python setup.py install 灌上的 v1.23 uninstall 掉,但是 site-packages 裡面一查, 仍有 v1.22 的 egg 存在 --> 直接再 pip uninstall peforth 一次,才把它 uninstall 掉。 --> pip install peforth 下來的在 site-packages 裡面就沒有 egg 字樣,如此可供分辨。同時 也證實 pip uninstall 不會 remove egg 版的 (python setup.py install上去的) 要下 多次 pip uninstall peforth 才輪得到舊版。 --> 我猜: 剛才改好程式之後用 ~\GitHub\peforth>python setup.py install 安裝進 site-package 的 1.23 並沒有蓋掉原來的, 因為這時裝上的是 egg, path 與用 whl 裝上的不同! [X] 經過 c:\Users\hcche\Documents\GitHub\peforth>python setup.py install 之後,確實會直接有類似 pip install 的效果 --> 可以 python -m peforth 執行了,但是 path 不同 pip install 的 c:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\version.txt setup.py 的 c:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth-1.23-py3.6.egg\peforth\version.txt [X] 用 ~\GitHub\peforth>python setup.py install 安裝進 site-package 雖然 path 不同,jupyter notebook 完全沒問題,頂多 Kernel > Restart 一下,馬上生效。完全符合我 「從 source 直接 install」 的期待,免去 pip install 或先前暴力 setup.bat 的麻煩。 結論: 1. 直接修改 GitHub source code (善用 GitHub 保障各版本安全) 2. pip uninstall peforth 把舊的清乾淨 3. c:\Users\hcche\Documents\GitHub\peforth>python setup.py install 從 source code 安裝 4. 有兩種方式執行、測試 a. 用 Jupyter Notebook 試驗,只要 Kernel > Restart 新版就生效了。 b. 執行 c:\Users\hcche\Documents\GitHub\peforth\peforth>python test.py 5. repeat [X] 19:07 2019-05-13 這段 code 在 peforth.f 裡面本應處裡 alias 的新 // help, 但是又有問題 \ \ Redefine // to "replace" alias' help message instead of "append". \ \ Append if last().help has stack diagram but no help message, otherewise replace. \ Stack diagram might be unexpectedly given again. That can be resolved by putting \ complete help message to the original word or use the trick of // dummy and then \ // again or simply don't give it again in the alias' help message. \ <py> ''' m = re.match("(?P<before>.*?)(?P<stackdiagram>\(.*\))(?P<after>.*)", last().help) if m and (m.groupdict()['before'] + m.groupdict()['after']).strip()=="": last().help += nexttoken('\\n|\\r'); else: last().help = nexttoken('\\n|\\r'); ''' </pyV> -indent ' // py: pop().xt=genxt("//",pop(1)) 問題如下,有些東西 help 裡面的 stack diagram 不見了!! [r Prepare an array of data to compare with rstack in selftest. Example: [r 1,2,3 r] [d True d] [p 'word1','word2' p] [r...r] section is optional, [d...d] section is the judge. --> 點掉也沒用! --> 13:34 2019-05-15 misc.f 裡面的新 ( comment ) 造成的。 --> 19:15 2019-05-15 已經乾脆放棄讓 (comment) 自動進 help 了,要 help 用 // 就好了。 (comment) 直接改成 nested 的,更好。 v1.23 [X] 14:06 2019-05-15 現在覺得原來的 (comment) 沒有我 gist words4jupyter.py 的 nested (comment) 好。 何必搞個這麼難懂的 (comment) 就會了讓 stack diagram 進 last.help 而已,有 // 就夠了! [X] 16:39 2019-05-16 本來的 // 一直想著前面有 (comment) 已經進 help 了!所以他是用 += 的, 難怪有這個問題,不要了,直接用 last().help = nexttoken('\n|\r'); 就好了。 v1.23 \ to be code ( # ( <str> -- ) // Comment down to ')' which can be nested if balanced nextstring('\(|\)')['str'] # skip TIB to the next delimiter cc = tib[ntib] # cc must be delimiter '(', ')', or '\n' vm.ntib+=1 # skip any of them if cc=='(': execute(_me) # recursion of ( execute(_me) # recursion of ) end-code immediate \ was code ( # ( <stack diagram> -- ) Get stack diagram to the last's help. a = nexttoken('\\)') b = nexttoken() # the ')' if compiling and last().help=="": # skip if help alreay exists last().help = '( ' + a + b + ' ' end-code immediate /// Nested not allowed yet. [X] 經 marker 刪除的 value & constant 留在 vm[context] 裡面的 garbage 沒有回收! marker 還要再加強,forget 也要注意。 --> 123 value x char abc value ss vm.forth dict>keys --> dict_keys(['CRLF', 'obj2dict', '_locals_', 'debug', 'screen-buffer', 'description', 'expected_rstack', 'expected_stack', 'test-result', '[all-pass]', 'xxx', 'x', 'y', 'ss']) ^^^ ^^^^ 有在 vm.forth 裡面 --> 執行 marker --> words 裡沒有 x, ss 了, 當然 --> 但是 vm.forth 裡還是存在,造成堆積!! v1.23 還是有這個問題,不知道該怎麼做。。。。 FP, see below 2020/07/27 08:38:15 value constant to 要重新定義. . . . . [X] 改寫所有的 code words 把彆扭的 help 用新的 # 功能改自然點。 done! v1.23 [X] quit.f 裡的怪東西都不要了 --> inport, outport, harry_port v1.23 [X] 把 gist 上的東西 include 進來,最主要的是有 support nesting 的 (comment) v1.23 [X] 取消 colon definition 中第一個 ( ... ) 的作用,只用 // 即可留 help --> 唉,試了就知道,很醜! v1.23 真的實現了 Notepad++ ^h replace regular expression Find what: "(\(\s+.*\))\s+(//)" Replace with: "// \1" [x] dos , cd 太重要了,從 misc.f 移進 peforth.f [X] 17:53 2019-05-11 接下來考慮出 v1.23 版。 [X] complete self-tests for new words , many are commented out. [X] 評估 misc.f unknown.f quit.f 的內容要怎麼分配 --> 全部放進 misc.f 加個 marker 全自動 load 進去。 --> 不要的人只要跑一下 marker 就可以全清掉。 --> 這些東西的 self-test 就要自己做,不能放 peforth.selftest 裡。 [X] peforth.f source code 裡還有很多中文 [X] 好像 *debug* 出不來.... --> 喔喔 是給 breakpoint 用的 exit 出的問題。 --> 趁放進 misc.f 的機會給它改名吧! quit [X] 測試 jupyter notebook [x] established the method to include misc.f from within quit.f [X] 測試 ipython (DOS box) [X] 進 ipython 之後 import peforth 看起來 self-test 都 ok, 但是從此之後 ipython 就無法輸出了。 執行 ipython -m peforth 也一樣。 --> ipython 自己的 display 也被關了,執行 peforth.dictate('display-on') 即可恢復。 --> 是 selftest 的 display-off 造成的? --> 槓掉 self-test 試試看... 真的好了! 連 ipython -m peforth 也好了。 --> 執行 c:\Users\hcche\Documents\GitHub\peforth\peforth>ipython test.py self-test 與之後的 功能都沒問題. [X] self-test on > 做出問題 > 然後下達 display-on 之後,治好了!證實 root cause 是 display-off. (最後發現,錯!不是這樣) but where? --> 從 quit.f 裡把 misc.f comment out 也好了, 故問題在他裡面。--> 找到了, pyclude 的 self-test 之前 stop 就好了 --> 查 display-off 怎麼弄的? --> display-on 只是 reset sys.stdout 而已 無可挑剔。算了,有 workaround 就好了。 [x] WSL Ubuntu 之下 display-off 之後的斷點 *debug* 也怪怪的,本想在其中下 display-on 再回來繼續, 結果一 exit 回來就回 Shell 了。試過 time delay 如下也無效。 <py> # 拖時間 factorial = 1 for i in range(2,10000): factorial = factorial * i </py> [X] 15:24 2019-05-22 靠! 連 Windows DOS 下也出現了這個問題, SRP: working directory 的差別 有問題 c:\Users\hcche\Documents\GitHub\peforth>python -m peforth 沒問題 c:\Users\hcche\Documents\GitHub\peforth\peforth>python -m peforth 發生在 *** (pyclude) 之前 --> 故意先做個 display-off on 看看.... RI: Bingo! Shit! selftest 裡 pyclude hello.py 必須以其所在位置為 working directory [x] 15:55 2019-05-22 發現這個 root cause 是耐心跑 v1.23 release check-list 時發現的,所以 那個 check-list 還是要好好做。 [/] 測試 ubuntu 的 ipython ---> 放棄,error message 如下: hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ ipython Command 'ipython' not found, but can be installed with: sudo apt install ipython hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ sudo apt install ipython [sudo] password for hcchen: Reading package lists... Done Building dependency tree Reading state information... Done Package ipython is not available, but is referred to by another package. This may mean that the package is missing, has been obsoleted, or is only available from another source E: Package 'ipython' has no installation candidate [X] Error! tib. unknown! --> 改成 "-->" 了 1.23 [X] 改寫 pypi.org 上的 readme.rst 本來的例子不太好了, pdb 其實很強。 改用 Azure notebook 介紹 ipython 的 magic command 比較好。 [X] 17:33 2019-05-19 改了 Github.com 上的 README.md , local 的 .rst [X] jupyter notebook 用 import peforth 就很好用了, 把 readme.md 裡沒有的 peforth kernel 拿掉,移進 Wiki 裡去。 --> 17:34 2019-05-19 done [X] 把 misc.f hello.py 等都加進 package [X] Test ubuntu 發現 cd 有必要進 peforth.f 但 dos 就該留在 misc.f 裡,且要判斷 os 是哪個。 --> py> os.name . cr ( posix or nt ) [X] v1.23 測試 ubuntu --> 靠!都忘了怎麼測試了,可以不經過 pip 版嗎? 09:35 2019-05-22 --> T550 ubuntu 16.04 連 pip 都沒有, python 版本也搞不清, 更不用說 virtualenv 了。 --> 感覺用 Linux 很恐慌,乾脆把 T550 上的 Ubuntu 16.04 remove 掉,改用新版的,希望可以避開 python 版本的問題。(See Ynote:"[筆記] Install Mozilla DeepSpeech Project" > "wsl ubuntu install python3.6.txt") --> 09:39 2019-05-22 T550 Ubuntu removed --> The recent is still 18.04 on Microsoft Store, so be it --> 10:43 2019-05-22 WSL installed --> how's the built-in python? --> See Ynote "好久沒玩 WSL Ubuntu, 為了 release peforth v1.23 測試整個再玩一次" [X] 有 python 3.6.5 built-in 沒有 pip <--- 先不管它,只測 python test.py 過了再說。 --> 一番折騰,過了! [/] 沒有 pip 可以 python -m peforth 嗎? 試試 python setup.py install 結果失敗 hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ python setup.py install Traceback (most recent call last): File "setup.py", line 4, in <module> from setuptools import setup ModuleNotFoundError: No module named 'setuptools' <------------------ hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ [X] 看了這篇 https://askubuntu.com/questions/861265/python-3-importerror-no-module-named-setuptools-ubuntu-14-04-lts 決定放棄,有測過 test.py 就好了。 [/] 即使上了 pypi.org 也還需要 pip (但 18.04 default 沒有), 不管了,有測過 test.py 就好了。 [/] 上了 pypi.org 之後,再用 Azure Notebooks 測試。 [X] Tests before a Release v1.23 [X] setup.py 裡的 copy right 年份要改成 2019 *** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。 [V1.22之後的新版] 打包步驟 2018/12/16 11:02 See my Ynote: "Pack peforth to peforth.whl" 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel 得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist 3. 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/* 需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note" 4. pip uninstall peforth 然後再 pip install peforth 試驗看看。 5. 完成! [X] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完! [X] 所有 run 法帶 selftest: [X] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=True [X] Run python setup.py install 更新本地 site-package 版本以供測試 [X] 1. python -i -m peforth [/] with-selftest .s words exit bye [X] 2. ipython -i -m peforth .' Hello World!!' cr bye [X] 3. ipython import peforth .s words [x] selftest peforth.ok() .s words <--- w/parent [x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。 [X] 4. jupyter notebook kernel > restart and clear outputs x = 123 %f x . x . \ ==> 123 (<class 'int'>) [/] 5. jupyter notebook --> peforth kernel --> .s words [/] 考慮 README.rst 改良 [X] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [X] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [X] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=False [X] 做出取消 selftest 的 wheel See my Ynote: "Pack peforth to peforth.whl" [x] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。 test.py hello.py misc.f [X] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel 得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist [X] pip uninstall peforth [X] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist> pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [X] 1. (i)python -i -m peforth [/] no-selftest .s words exit [X] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel% [X] 3. (i)python import peforth [X] no selftest, peforth.ok() .s words <--- no parent [X] 1234 bye check echo %errorlevel% [X] 4. jupyter notebook --> *debug* ok> .s cd help exit %f %%f magic command [/] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了 [/] pip uninstall peforth [/] pip install (use /mnt/...the wheel) to WSL ubuntu [/] ipython -m peforth [/] ipython , import peforth , magic commands [X] 直接用測過的 wheel update Pypi 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/* 需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note" ID, password search my Ynote with pypi _account_ [X] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。 [/] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth [/] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth [X] Windows DOS 下試 [X] Test Azure Online Jupyter Notebooks https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb !pip install peforth import peforth %f version drop x = 12345 %f x --> \ 查看 unknown 的效果 [X] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 1. 先 commit 上 develop branch, upload 上網上 Github. 2. 切到 master 3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。 解決 conflicts 之後完成 merge. 4. 再 repeat 2-3 但切到 develop 把 master merge 過去。 Master 上的應該是些 README.md 的修改。 [X] version 改成 1.24 (必須跳過 1.20 直接到 1.21 否則會變成 1.2) [X] 11:28 2019-05-26 make a master merge for the article of Febenacci and Decorator [X] rename the article to 'peforth helps to understand python Decorator' [/] 11:35 2019-05-26 write an article to introduce 'unknown' --> forget this, covered already. [/] 11:35 2019-05-26 find the video I introduce 'unknown' and the other thing --> forget this, covered already. [X] 09:11 2019-11-21 本來跑 GitHub\peforth\setup.bat 讓改好的新版生效,在 anaconda 之下還行嗎? 1. 跑 anaconda's prompt make sure python runable 2. peforth runable too, check path 3. cd to GitHub\peforth run setup 4. check peforth OneNote 筆記: "Develop peforth in an Anaconda virtual environment" https://onedrive.live.com/view.aspx?resid=A796EA18AC8C1DA9%2112289&id=documents&wd=target%28Anaconda.one%7CB4E0DFAB-84F7-43D2-A5AB-515B43314252%2FDevelop%20peforth%20in%20an%20Anaconda%20virtual%20environment%7C99DE5C5F-B36D-4949-9471-BC7A857E3C2B%2F%29 [X] 16:54 2019-07-22 從這裡 https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/install.html 讀到 有從 github repo 上直接 pip install 的方法,e.g.: pip install https://github.com/ipython-contrib/jupyter_contrib_nbextensions/tarball/master 試試看 peforth 可不可以這樣 install ? 可以的話就不用上 pypi 了 pip install https://github.com/hcchengithub/peforth/master or pip install https://github.com/hcchengithub/peforth ==> 結果兩個都失敗 [ ] GitHub 有開始做 package hosting 了: https://help.github.com/en/github/managing-packages-with-github-packages/about-github-packages#supported-clients-and-formats [X] 2019/11/24 06:10:22 projectk.py 裡 import 好多它本身不用的 modules (它自己只 用到 re regular expression 一個) 我的註解說: import re # import whatever we want, don't rely on parent module e.g. peforth __init__.py 也是有理,因為 projectk.py kernel 有自己的 space. 然而 modules 應該是 global 的, 不是嗎?從 forth code 裡 import 不行嗎? --> 試了就知道,把 projectk.py 裡 多餘的 imports comment 掉 --> 出問題的時候很晚,只要是 native modules 有機會解決..... 來自 help import 的 hints \ import os __main__ :: peforth.projectk.os=pop(1) \ peforth global , does not work when run by 'python test.py' import os py> vm :: os=pop(1) \ this works! when run by 'python test.py' import inspect py> vm :: inspect=pop(1) import dis py> vm :: dis=pop(1) import json py> vm :: json=pop(1) 但是 sys 太根本了必須要在 projectk.py 裡 import 好。 [X] setup.py 裡的 copy right 年份要改成 2019 [/] 2019/11/24 05:20 用 Anaconda 之後似乎 kernel.json 也有問題? 裡面描述的 peforthkernel.py path 是寫死的,在我 OA、Anaconda 上就不對了。 好像只要無意把 peforth 加進 JupyterNotebook 的 kernel 就沒問題。 [X] 05:29 2019-11-21 projectk.py 裡面的 local, Comment, debug 這三個 global token 好像是多 餘的, 有空檢討看看. [X] local 可能是 ok(prompt='OK ', loc={}, glo={}, cmd="") 或 redefined unknown 用的 <--- 不是 13:47 2019/11/25 delete all suspected things from projectk.py --> dos ok, jupyternotebook ok. * 注意!setup.bat 不會更新 site-packages 的 peforth\ folder 要手動從 peforth-1.24-py3.7.egg <== 2020.7.28 解了! see OneNote2020 > "Develop peforth in an Anaconda virtual environment" copy peforth\ 來蓋過 site-packages 裡的 peforth\ folder. refer to https://onedrive.live.com/view.aspx?resid=A796EA18AC8C1DA9%2112289&id=documents&wd=target%28Anaconda.one%7CB4E0DFAB-84F7-43D2-A5AB-515B43314252%2FDevelop%20peforth%20in%20an%20Anaconda%20virtual%20environment%7C99DE5C5F-B36D-4949-9471-BC7A857E3C2B%2F%29 [X] 14:41 2019/11/25 quit.f 裡這種東西應該要改良,太笨了: import os py> vm :: os=pop(1) \ 太笨 import os \ 應該改良成這樣 [X] 15:21 2019/11/25 整理 peforth.f quit.f peforth.selftest 的關係,更有系統了。 __init__.py 只 load 進基本的 peforth.f quit.f 其他的都由 quit.f 負責,使 quit.f 成為 eforth 系統的 main program, 統籌者。 [X] 16:07 2019/11/25 一舉搞懂 pop(1) code test # ( a b c -- ) print given things print(pop(), pop(), pop()) end-code 1 2 3 test 3 2 1 <-- 結果,顯示三個 pop() 是從左到右抓取 TOS 的。 [X] Tests before a Release v1.24 [X] setup.py 裡的 copy right 年份要改成 2019 *** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。 [V1.22之後的新版] 打包步驟 2018/12/16 11:02 See my Ynote: "Pack peforth to peforth.whl" 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。 2. (記得先把 dist , build , peforth.egg-info 等 folder 先殺掉) 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel 得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist 3. 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/* 需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note" 4. pip uninstall peforth 然後再 pip install peforth 試驗看看。 5. 完成! [X] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完! [X] 所有 run 法帶 selftest: [X] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=True [X] 先通過最基本的 selftest: GitHub\peforth\peforth>python test.py [X] Run python setup.py install 更新本地 site-package 版本以供測試 [X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg 裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。 [X] 1. python -i -m peforth [X] with-selftest .s words exit bye [X] 2. ipython -i -m peforth .' Hello World!!' cr bye [X] 3. ipython import peforth .s words [X] selftest peforth.ok() .s words <--- w/parent [X] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。 [X] 4. jupyter notebook kernel > restart and clear outputs x = 123 %f x . x . \ ==> 123 (<class 'int'>) [X] 5. jupyter notebook --> peforth kernel --> .s words [/] 考慮 README.rst 改良 [/] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [X] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [X] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=False [X] 同上 python test.py 先試試看 [X] Run python setup.py install 更新本地 site-package 版本以供測試 [X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg 裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。 [X] 同上 repeat 1) python -m peforth 2) ipython -m peforth [X] 做出取消 selftest 的 wheel See my Ynote: "Pack peforth to peforth.whl" [X] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。 test.py hello.py misc.f [X] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel 得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist [X] pip uninstall peforth site-packages 下兩個 peforth folder 刪掉了。 setup.bat 建立的 EGG 檔 peforth-1.24-py3.7.egg 也刪掉,否則 pip install 會 被 skip 過去。 [X] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist> pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [X] 1. (i)python -i -m peforth [X] no-selftest .s words exit [X] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel% [X] 3. (i)python import peforth [X] no selftest, peforth.ok() .s words <--- no parent [X] 1234 bye check echo %errorlevel% [X] 4. jupyter notebook --> *debug* ok> .s cd help exit %f %%f magic command [/] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了 [/] pip uninstall peforth 已知 Colab & Azure 都是 Ubuntu 故不必自己多測了 [/] pip install (use /mnt/...the wheel) to WSL ubuntu [/] ipython -m peforth [/] ipython , import peforth , magic commands [X] 直接用測過的 wheel update Pypi 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/* 需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note" ID, password search my Ynote with pypi _account_ Note: Anaconda base 沒有 twine, 在 Anaconda Navigator 裡找到 twine 把它勾起來 Apply. [X] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。 [X] Windows DOS 下試 [/] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth [/] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth [X] Test Online Jupyter Notebooks Google Colab, Microsoft Azure, and Notebooks.ai !pip install peforth import peforth %f version drop x = 12345 %f x --> \ 查看 unknown 的效果 \ Colab & Azure 都用 Ubuntu 查版本, Notebooks.ai 用 Debian 都可用這行指令 !cat /etc/os-release %f py> path --> \ 查看 path 發現 Azure 就是用 Anaconda 所以它有 support Ubuntu! %pwd \ 查看 working directory [x] Colab https://colab.research.google.com/drive/1nZpybQryEiwYzpMvG1tHg4qbNnd_rMey#scrollTo=yAuF9DZcrFaT [X] Azure https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb [X] notebooks.ai 也測測看 [X] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 1. 先 commit 上 develop branch, upload 上網上 Github. 2. 切到 master 3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。 解決 conflicts 之後完成 merge. 4. 再 repeat 2-3 但切到 develop 把 master merge 過去。 Master 上的應該是些 README.md 的修改。 [X] version 改成 1.25 (必須跳過 1.20 直接到 1.21 否則會變成 1.2) [X] 要不要把 projectk.py sync 回 project-k (很早以前) projectk.py 改了一點,忘了如何 sync 回 project-k 的? 05:30 2019-11-21 peforth source code 裡的 projectk.py 本身不是從 github 直接下來的, 而是 硬放上去的,因此不會與 project-k github 自動同步 <--- 想想看怎麼辦。 [ ] 15:56 2019/11/25 經常要 (see) 東西都會出這個問題: Callable in phaseB <function compyle_anonymous at 0x00000232B1164A68>: Circular reference detected 問問看有沒有 workaround ? [ ] 13:42 2019/11/27 下回 release 要在 README.rst ~.md 裡明列有測過的系統: 1. Windows Anaconda DOSBox pyhon 3.7, DOSBox ipython, JupyterNotebook, JupyterLab 2. Colab (Ubuntu,Anaconda), Azure notebooks (Ubuntu), Notebooks.ai (Debian) [X] 2020/07/27 08:33 可以把 [obj>keys] 'keys' 定義成 dir | dict>keys 這樣就不會與 dir 重複了。又可以與 jeforth 相容。 [X] 2020/07/27 08:38:15 value constant to 要重新定義,不要再用 vm.forth 存放了,改用 variable 自己 word. See OneNote2020 > "Jeforth variable 變革" --> 成功了。 [ ] 考慮 projectk.py 本身也上 pypi , 可以 pip install projectk 更有意義! [X] 07:49 2020/10/04 參考 KsanaVM 發現我原先對 prompt 的時機有誤解,改好了。 [ ] 15:49 2020/10/24 v1.25 好了以後 projectk.py 要 sync 回 projectk [X] 15:30 2020/10/24 準備 release v1.25 to pypi so as to allow gom to have it easily [X] 15:54 2020/10/24 先試試看 gom ok? --> Pass, 連 selftest 也都 pass. [X] setup.py 裡的 copy right 年份要改成 2019 *** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。 [V1.22之後的新版] 打包步驟 2018/12/16 11:02 See my Ynote: "Pack peforth to peforth.whl" 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。 2. (記得先把 dist , build , peforth.egg-info 等 folder 先殺掉) 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel 得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist 3. 若無 twine 則 pip install twine 很快很順 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/* 需要帳號密碼,看這裡 Ynote or Evernote: "python pypi 研究 -- upload to PyPI ok now.note" 4. pip uninstall peforth 然後再 pip install peforth 試驗看看。 5. 完成! [X] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完! [ ] 所有 run 法帶 selftest: [X] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=True [X] 先通過最基本的 selftest: GitHub\peforth\peforth>python test.py [X] Run python setup.py install 更新本地 site-package 版本以供測試 [X] 要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg 裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。 16:27 2020/10/24 不必這樣,因為 python setup.py install 灌好的 peforth v1.25 是 c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth-1.25-py3.7.egg 而 pip install peforth 灌好的是另一個 peforth v1.25 c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth\ 兩個可以並存!而且後者優先。只要把後者 directory name 改成 peforth.disabled 就可以讓前者生效,前者是 local install 測試時有其方便性。 [X] 1. python -i -m peforth [X] with-selftest .s words exit bye [X] 2. ipython -i -m peforth .' Hello World!!' cr bye [/] 3. ipython import peforth .s words [/] selftest peforth.ok() .s words <--- w/parent [/] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。 [X] 4. jupyter notebook kernel > restart and clear outputs x = 123 %f x . x . \ ==> 123 (<class 'int'>) [X] 5. jupyter notebook --> peforth kernel --> .s words [X] 6. Gom 手動移除現有的 peforth directories from: c:\Users\8304018\AppData\Roaming\gom\2020\python\.. 然後從 SCRIPTING > Script Choice > pip install peforth > Tools > Install Python Package 灌 peforth 很快很順 import peforth, peforth_gom_port 執行 peforth.ok() 無誤。 新增 peforth_gom_port.py 放到 peforth repo 的 playground directory 裡。 [/] 考慮 README.rst 改良 [/] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [ ] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [ ] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=False [ ] 同上 python test.py 先試試看 [ ] Run python setup.py install 更新本地 site-package 版本以供測試 [X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg 裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。 [ ] 同上 repeat 1) python -m peforth 2) ipython -m peforth [ ] 做出取消 selftest 的 wheel See my Ynote: "Pack peforth to peforth.whl" [ ] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。 test.py hello.py misc.f [ ] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel 得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist [ ] pip uninstall peforth site-packages 下兩個 peforth folder 刪掉了。 setup.bat 建立的 EGG 檔 peforth-1.24-py3.7.egg 也刪掉,否則 pip install 會 被 skip 過去。 [ ] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist> pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [ ] 1. (i)python -i -m peforth [ ] no-selftest .s words exit [ ] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel% [ ] 3. (i)python import peforth [ ] no selftest, peforth.ok() .s words <--- no parent [ ] 1234 bye check echo %errorlevel% [ ] 4. jupyter notebook --> *debug* ok> .s cd help exit %f %%f magic command [ ] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了 [ ] pip uninstall peforth 已知 Colab & Azure 都是 Ubuntu 故不必自己多測了 [ ] pip install (use /mnt/...the wheel) to WSL ubuntu [ ] ipython -m peforth [ ] ipython , import peforth , magic commands [ ] 直接用測過的 wheel update Pypi 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/* 需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note" ID, password search my Ynote with pypi _account_ Note: Anaconda base 沒有 twine, 在 Anaconda Navigator 裡找到 twine 把它勾起來 Apply. [ ] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。 [ ] Windows DOS 下試 [ ] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth [ ] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth [ ] Test Online Jupyter Notebooks Google Colab, Microsoft Azure, and Notebooks.ai !pip install peforth import peforth %f version drop x = 12345 %f x --> \ 查看 unknown 的效果 \ Colab & Azure 都用 Ubuntu 查版本, Notebooks.ai 用 Debian 都可用這行指令 !cat /etc/os-release %f py> path --> \ 查看 path 發現 Azure 就是用 Anaconda 所以它有 support Ubuntu! %pwd \ 查看 working directory [ ] Colab https://colab.research.google.com/drive/1nZpybQryEiwYzpMvG1tHg4qbNnd_rMey#scrollTo=yAuF9DZcrFaT [ ] Azure https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb [ ] notebooks.ai 也測測看 [ ] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 1. 先 commit 上 develop branch, upload 上網上 Github. 2. 切到 master 3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。 解決 conflicts 之後完成 merge. 4. 再 repeat 2-3 但切到 develop 把 master merge 過去。 Master 上的應該是些 README.md 的修改。 [X] version 改成 1.25 (必須跳過 1.20 直接到 1.21 否則會變成 1.2) [ ] 要不要把 projectk.py sync 回 project-k (很早以前) projectk.py 改了一點,忘了如何 sync 回 project-k 的? 05:30 2019-11-21 peforth source code 裡的 projectk.py 本身不是從 github 直接下來的, 而是 硬放上去的,因此不會與 project-k github 自動同步 <--- 想想看怎麼辦。 [X] 17:01 2020/10/24 v1.25 已經上了 pypi 也測過 Gom 成功,以上測試慢慢做,先上 github 再說。 [X] 14:22 2020/10/29 vm.prompt 是要給 gom port dialog 知道目前 prompt 否則只在 ok() 肚子裡。 [X] 13:52 2020/11/23 把 pypi 的 v1.25 直接換成 local 的 v1.26 --> 直接 copy __init__.py version.txt 蓋過去 c:\Users\8304018\AppData\Roaming\gom\2020\python\peforth --> 11> <py> ok() </py> --> prompt 變成 ok , exit --> prompt 變回 11> 成功! 這就是 v1.26 無誤。 [X] 10:26 2020/11/26 改良 breakpoint 不需要改 peforth, 從 application 端外掛就可以了。 Usage of breakpoint: peforth.bp(22,locals()) # drop breakpoint 22 with locals() for i in [11,22,33]: peforth.bps[i]=0 # disable breakpoints 11,22,33 for i in [11,22,33]: peforth.bps[i]=i # enable breakpoints 11,22,33 peforth.bps=[i for i in range(1000)] # reload and enable all breakpoints 'exit' or ESC leaves the breakpoint and continue running. 'bye' to totally stop the script session. # breakpoint # peforth.bp() # drop a breakpoint using default prompt bp> # peforth.bp(11) # drop a breakpoint using prompt bp11> w/p passing locals() # peforth.bp(22,locals()) # drop a breakpoint using prompt bp22> with locals() # peforth.bps=[] # disable all breakpoints # peforth.dictate("peforth :: bps=[]") # disable all breakpoints # peforth.dictate("peforth :: bps=[123,345,567]") # enable only listed breakpoints # peforth.dictate("peforth :: bps[123]=0") # disable the breakpoint 123 # peforth.dictate("peforth :: pop(111)") # disable the breakpoint 111 # for i in [11,22,33]: peforth.bps[i]=0 # disable breakpoints 11,22,33 # peforth.bps=[i for i in range(1000)] # reload and enable all breakpoints def bp(id=None,locals=None): if id==None: id = 0 prompt='bp> ' else: prompt="bp{}>".format(id) if id in peforth.bps: peforth.push(locals).ok(prompt, cmd="to _locals_") peforth.bp = bp peforth.bps = [i for i in range(1000)] [X] 17:33 2020/12/07 配合 peforth.bp(22,locals()) 新增 bl be bd be* bd* 等指令 [ ] 17:34 2020/12/07 release v1.26 to pypi [X] 17:37 2020/12/07 先試試看 gom ok? --> Pass, 連 selftest 也都 pass. [X] setup.py 裡的 copy right 年份要改成 2019 *** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。 [V1.22之後的新版] 打包步驟 2018/12/16 11:02 See my Ynote: "Pack peforth to peforth.whl" 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。 2. (記得先把 dist , build , peforth.egg-info 等 folder 先殺掉) 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel 得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist 3. 若無 twine 則 pip install twine 很快很順 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/* 需要帳號密碼,看這裡 Ynote or Evernote: "python pypi 研究 -- upload to PyPI ok now.note" 4. pip uninstall peforth 然後再 pip install peforth 試驗看看。 5. 完成! [ ] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完! [ ] 所有 run 法帶 selftest: [ ] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=True [ ] 先通過最基本的 selftest: GitHub\peforth\peforth>python test.py [ ] Run python setup.py install 更新本地 site-package 版本以供測試 [ ] 要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg 裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。 16:27 2020/10/24 不必這樣,因為 python setup.py install 灌好的 peforth v1.25 是 c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth-1.25-py3.7.egg 而 pip install peforth 灌好的是另一個 peforth v1.25 c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth\ 兩個可以並存!而且後者優先。只要把後者 directory name 改成 peforth.disabled 就可以讓前者生效,前者是 local install 測試時有其方便性。 [ ] 1. python -i -m peforth [X] with-selftest .s words exit bye [ ] 2. ipython -i -m peforth .' Hello World!!' cr bye [ ] 3. ipython import peforth .s words [/] selftest peforth.ok() .s words <--- w/parent [/] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。 [ ] 4. jupyter notebook kernel > restart and clear outputs x = 123 %f x . x . \ ==> 123 (<class 'int'>) [ ] 5. jupyter notebook --> peforth kernel --> .s words [ ] 6. Gom 手動移除現有的 peforth directories from: c:\Users\8304018\AppData\Roaming\gom\2020\python\.. 然後從 SCRIPTING > Script Choice > pip install peforth > Tools > Install Python Package 灌 peforth 很快很順 import peforth, peforth_gom_port 執行 peforth.ok() 無誤。 新增 peforth_gom_port.py 放到 peforth repo 的 playground directory 裡。 [ ] 考慮 README.rst 改良 [ ] 若有改過 README.rst 則 wheel 就要重做 --> quit.f selftest=False --> 重來 [ ] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本: [ ] 改 GitHub\peforth\quit.f ' <selftest> :: enabled=False [ ] 同上 python test.py 先試試看 [ ] Run python setup.py install 更新本地 site-package 版本以供測試 [X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg 裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。 [ ] 同上 repeat 1) python -m peforth 2) ipython -m peforth [ ] 做出取消 selftest 的 wheel See my Ynote: "Pack peforth to peforth.whl" [ ] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。 test.py hello.py misc.f [ ] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel 得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist [ ] pip uninstall peforth site-packages 下兩個 peforth folder 刪掉了。 setup.bat 建立的 EGG 檔 peforth-1.24-py3.7.egg 也刪掉,否則 pip install 會 被 skip 過去。 [ ] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist> pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。 [ ] 1. (i)python -i -m peforth [ ] no-selftest .s words exit [ ] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel% [ ] 3. (i)python import peforth [ ] no selftest, peforth.ok() .s words <--- no parent [ ] 1234 bye check echo %errorlevel% [ ] 4. jupyter notebook --> *debug* ok> .s cd help exit %f %%f magic command [ ] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了 [ ] pip uninstall peforth 已知 Colab & Azure 都是 Ubuntu 故不必自己多測了 [ ] pip install (use /mnt/...the wheel) to WSL ubuntu [ ] ipython -m peforth [ ] ipython , import peforth , magic commands [ ] 直接用測過的 wheel update Pypi 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/* 需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note" ID, password search my Ynote with pypi _account_ Note: Anaconda base 沒有 twine, 在 Anaconda Navigator 裡找到 twine 把它勾起來 Apply. [ ] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。 [ ] Windows DOS 下試 [ ] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth [ ] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth [ ] Test Online Jupyter Notebooks Google Colab, Microsoft Azure, and Notebooks.ai !pip install peforth import peforth %f version drop x = 12345 %f x --> \ 查看 unknown 的效果 \ Colab & Azure 都用 Ubuntu 查版本, Notebooks.ai 用 Debian 都可用這行指令 !cat /etc/os-release %f py> path --> \ 查看 path 發現 Azure 就是用 Anaconda 所以它有 support Ubuntu! %pwd \ 查看 working directory [ ] Colab https://colab.research.google.com/drive/1nZpybQryEiwYzpMvG1tHg4qbNnd_rMey#scrollTo=yAuF9DZcrFaT [ ] Azure https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb [ ] notebooks.ai 也測測看 [ ] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。 1. 先 commit 上 develop branch, upload 上網上 Github. 2. 切到 master 3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。 解決 conflicts 之後完成 merge. 4. 再 repeat 2-3 但切到 develop 把 master merge 過去。 Master 上的應該是些 README.md 的修改。 [ ] 要不要把 projectk.py sync 回 project-k (很早以前) projectk.py 改了一點,忘了如何 sync 回 project-k 的? 05:30 2019-11-21 peforth source code 裡的 projectk.py 本身不是從 github 直接下來的, 而是 硬放上去的,因此不會與 project-k github 自動同步 <--- 想想看怎麼辦。 [X] version 改成 1.27 (必須跳過 1.20 直接到 1.21 否則會變成 1.2) ```
[ { "content": "```python\nfrom builtins import object\nimport numpy as np\n\nfrom cs231n.layers import *\nfrom cs231n.fast_layers import *\nfrom cs231n.layer_utils import *\n\n\nclass ThreeLayerConvNet(object):\n \"\"\"\n A three-layer convolutional network with the following architecture:\n\n conv - re...
[ { "content": "<|memory_start|>```python\nfrom builtins import object\nimport numpy as np\n\nfrom cs231n.layers import *\nfrom cs231n.fast_layers import *\nfrom cs231n.layer_utils import *\n\n\nclass ThreeLayerConvNet(object):\n \"\"\"\n A three-layer convolutional network with the following architecture:\...
```python from builtins import object import numpy as np from cs231n.layers import * from cs231n.fast_layers import * from cs231n.layer_utils import * class ThreeLayerConvNet(object): """ A three-layer convolutional network with the following architecture: conv - relu - 2x2 max pool - affine - relu - affine - softmax The network operates on minibatches of data that have shape (N, C, H, W) consisting of N images, each with height H and width W and with C input channels. """ def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7, hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0, dtype=np.float32): """ Initialize a new network. Inputs: - input_dim: Tuple (C, H, W) giving size of input data - num_filters: Number of filters to use in the convolutional layer - filter_size: Size of filters to use in the convolutional layer - hidden_dim: Number of units to use in the fully-connected hidden layer - num_classes: Number of scores to produce from the final affine layer. - weight_scale: Scalar giving standard deviation for random initialization of weights. - reg: Scalar giving L2 regularization strength - dtype: numpy datatype to use for computation. """ self.params = {} self.reg = reg self.dtype = dtype ############################################################################ # TODO: Initialize weights and biases for the three-layer convolutional # # network. Weights should be initialized from a Gaussian with standard # # deviation equal to weight_scale; biases should be initialized to zero. # # All weights and biases should be stored in the dictionary self.params. # # Store weights and biases for the convolutional layer using the keys 'W1' # # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the # # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases # # of the output affine layer. # ############################################################################ #pass C,H,W = input_dim self.params['W1'] = np.random.randn(num_filters, C, filter_size, filter_size) * weight_scale self.params['b1'] = np.zeros(num_filters) self.params['W2'] = np.random.randn((H/2)*(W/2)*num_filters,hidden_dim) * weight_scale self.params['b2'] = np.zeros(hidden_dim) self.params['W3'] = np.random.randn(hidden_dim, num_classes) * weight_scale self.params['b3'] = np.zeros(num_classes) ############################################################################ # END OF YOUR CODE # ############################################################################ for k, v in self.params.items(): self.params[k] = v.astype(dtype) def loss(self, X, y=None): """ Evaluate loss and gradient for the three-layer convolutional network. Input / output: Same API as TwoLayerNet in fc_net.py. """ W1, b1 = self.params['W1'], self.params['b1'] W2, b2 = self.params['W2'], self.params['b2'] W3, b3 = self.params['W3'], self.params['b3'] # pass conv_param to the forward pass for the convolutional layer filter_size = W1.shape[2] conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2} # pass pool_param to the forward pass for the max-pooling layer pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} scores = None ############################################################################ # TODO: Implement the forward pass for the three-layer convolutional net, # # computing the class scores for X and storing them in the scores # # variable. # ############################################################################ #pass conv_relu_pool_out, cache_conv = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) hidden_out, cache_hidden = affine_relu_forward(conv_relu_pool_out, W2, b2) scores, cache_scores = affine_forward(hidden_out, W3, b3) ############################################################################ # END OF YOUR CODE # ############################################################################ if y is None: return scores loss, grads = 0, {} ############################################################################ # TODO: Implement the backward pass for the three-layer convolutional net, # # storing the loss and gradients in the loss and grads variables. Compute # # data loss using softmax, and make sure that grads[k] holds the gradients # # for self.params[k]. Don't forget to add L2 regularization! # ############################################################################ #pass # conv - relu - 2x2 max pool - affine - relu - affine - softmax loss, dscores = softmax_loss(scores, y) loss += 0.5 * self.reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3)) dhidden_out, dW3, db3 = affine_backward(dscores, cache_scores) dconv_relu_pool_out, dW2, db2 = affine_relu_backward(dhidden_out, cache_hidden) dX, dW1, db1 = conv_relu_pool_backward(dconv_relu_pool_out, cache_conv) dW1 += self.reg * W1 dW2 += self.reg * W2 dW3 += self.reg * W3 grads['W1'], grads['b1'] = dW1, db1 grads['W2'], grads['b2'] = dW2, db2 grads['W3'], grads['b3'] = dW3, db3 ############################################################################ # END OF YOUR CODE # ############################################################################ return loss, grads ```
[ { "content": "Repeat the following code:\n```python\n#!/usr/bin/env python\n\"\"\"\nExtract grammatical error edits from CoNLL dataset SGML files into kilogram format.\nNot using XML or SGML parse since the file is not XML/SGML-compliant.\nwget http://www.comp.nus.edu.sg/~nlp/conll13st/release2.3.1.tar.gz\n\"\"...
[ { "content": "Repeat the following code:\n<|memory_start|>```python\n#!/usr/bin/env python\n\"\"\"\nExtract grammatical error edits from CoNLL dataset SGML files into kilogram format.\nNot using XML or SGML parse since the file is not XML/SGML-compliant.\nwget http://www.comp.nus.edu.sg/~nlp/conll13st/release2....
```python #!/usr/bin/env python """ Extract grammatical error edits from CoNLL dataset SGML files into kilogram format. Not using XML or SGML parse since the file is not XML/SGML-compliant. wget http://www.comp.nus.edu.sg/~nlp/conll13st/release2.3.1.tar.gz """ import re import argparse import unicodecsv as csv SPACE = re.compile(r'\s+') parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-o', '--output', dest='out_file', action='store', required=True, help='output TSV file') parser.add_argument('files', nargs='+', help='input SGML files') args = parser.parse_args() def extract_grammar_edits(in_files, out_file): """ Extracts grammar edits from CoNLL SGML files :type out_file: str """ with open(out_file, 'w') as out: csvwriter = csv.writer(out, delimiter='\t', encoding='utf-8') for in_file in in_files: with open(in_file, 'r') as input_f: values = {} corrections = [] for line in input_f: # need to stack the corrections to replace from the end if not line.startswith("<"): # paragraph paragraphs[-1]['orig'] += line.strip() paragraphs[-1]['new'] += line.strip() elif line.startswith('<TEXT>'): paragraphs = [] corrections = [] elif line.startswith('<P>') or line.startswith('<TITLE>'): paragraphs.append({'orig': '', 'new': ''}) elif line.startswith('</DOC>'): nonoverlap_corrs = [] for values in corrections: if values['type'] != 'Prep': continue if len(nonoverlap_corrs) > 0 and int(values['start_off']) >= nonoverlap_corrs[-1]['start_off'] and \ int(values['end_off']) <= nonoverlap_corrs[-1]['end_off']: continue if len(nonoverlap_corrs) > 0 and int(values['start_off']) == nonoverlap_corrs[-1]['start_off'] and \ int(values['end_off']) >= nonoverlap_corrs[-1]['end_off']: # change offsets if overlapping replacement nonoverlap_corrs[-1]['end_off'] = nonoverlap_corrs[-1]['start_off'] values['start_off'] = int(values['start_off']) values['end_off'] = int(values['end_off']) values['start_par'] = int(values['start_par']) nonoverlap_corrs.append(values) # make corrections for values in reversed(nonoverlap_corrs): new_par = paragraphs[values['start_par']]['new'] paragraphs[int(values['start_par'])]['new'] = ' '.join((new_par[:values['start_off']], values['correction'], new_par[values['end_off']:])) # write paragraphs to output for p in paragraphs: # stip multiple spaces p['new'] = SPACE.sub(' ', p['new']) csvwriter.writerow([p['orig'], p['new']]) elif line.startswith('<MISTAKE'): # update paragraph values = dict([x.split('=') for x in line.strip()[9:-1].replace('"', '').split()]) if values['start_par'] != values['end_par']: continue corrections.append(values) elif line.startswith('<CORRECTION>'): values['correction'] = line.strip()[12:-13] elif line.startswith("<TYPE>"): values['type'] = line.strip()[6:-7] extract_grammar_edits(args.files, args.out_file) ```
[ { "content": "```python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2015 Red Hat, Inc.\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# the GNU General Public License v.2, or (at your option) any later versio...
[ { "content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2015 Red Hat, Inc.\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# the GNU General Public License v.2, or (at your option) ...
```python # -*- coding: utf-8 -*- # Copyright (C) 2015 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Anne Mulhern <amulhern@redhat.com> """ tests.test_utils ================ Tests utilities. .. moduleauthor:: mulhern <amulhern@redhat.com> """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import pyblk from ._constants import GRAPH class TestGraphUtils(object): """ Test utilities that work over networkx graphs. """ # pylint: disable=too-few-public-methods def test_roots(self): """ Verify that roots are really roots. """ roots = pyblk.GraphUtils.get_roots(GRAPH) in_degrees = GRAPH.in_degree(roots) assert all(in_degrees[r] == 0 for r in roots) ```
[ { "content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\nimport pytest\n\nfrom desmod.timescale import parse_time, scale_time\n\n\n@pytest.mark.parametrize('test_input, expected', [\n ('12 s', (12, 's')),\n ('12s', (12, 's')),\n ('+12s', (12, 's')),\n ('-12...
[ { "content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\nimport pytest\n\nfrom desmod.timescale import parse_time, scale_time\n\n\n@pytest.mark.parametrize('test_input, expected', [\n ('12 s', (12, 's')),\n ('12s', (12, 's')),\n ('+12s', (12, '...
```python import pytest from desmod.timescale import parse_time, scale_time @pytest.mark.parametrize('test_input, expected', [ ('12 s', (12, 's')), ('12s', (12, 's')), ('+12s', (12, 's')), ('-12s', (-12, 's')), ('12.0 s', (12.0, 's')), ('12. s', (12.0, 's')), ('+12.0 s', (12.0, 's')), ('-12.0 s', (-12.0, 's')), ('12.000 s', (12.0, 's')), ('1.2e1 s', (12.0, 's')), ('1.2e+1 s', (12.0, 's')), ('1.2e-1 s', (0.12, 's')), ('-1.2e-1 s', (-0.12, 's')), ('12.s', (12.0, 's')), ('12.0s', (12.0, 's')), ('12.000s', (12.0, 's')), ('1.2e1s', (12.0, 's')), ('.12e+2s', (12.0, 's')), ('.12s', (0.12, 's')), ('12 fs', (12, 'fs')), ('12 ps', (12, 'ps')), ('12 ns', (12, 'ns')), ('12 us', (12, 'us')), ('12 ms', (12, 'ms')), ('12.0ms', (12.0, 'ms')), ('s', (1, 's')), ('fs', (1, 'fs')), ]) def test_parse_time(test_input, expected): m, u = parse_time(test_input) assert (m, u) == expected assert isinstance(m, type(expected[0])) @pytest.mark.parametrize('test_input', [ '', '123 s', '123', '123.0', '123 S', '123 Ms', '123e1.3 s', '+-123 s', '123 ks', '. s', '1-.1 s', '1e1.2 s', ]) def test_parse_time_except(test_input): with pytest.raises(ValueError) as exc_info: parse_time(test_input) assert 'float' not in str(exc_info.value) def test_parse_time_default(): assert parse_time('123', default_unit='ms') == (123, 'ms') @pytest.mark.parametrize('input_t, input_tscale, expected', [ ((1, 'us'), (1, 'us'), 1), ((1, 'us'), (10, 'us'), 0.1), ((1000, 'us'), (1, 'ms'), 1), ((1, 'us'), (100, 'ms'), 1e-5), ((50, 'ms'), (1, 'ns'), 50000000), ((5.2, 'ms'), (1, 'us'), 5200), ]) def test_scale_time(input_t, input_tscale, expected): scaled = scale_time(input_t, input_tscale) assert expected == scaled assert isinstance(scaled, type(expected)) ```
[ { "content": "Produce an exact reconstruction of the code:\n```python\nimport os\nfrom pathlib import Path\nimport unittest\n\nimport ray\nimport ray.rllib.agents.marwil as marwil\nfrom ray.rllib.utils.framework import try_import_tf\nfrom ray.rllib.utils.test_utils import check_compute_single_action, \\\n fr...
[ { "content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\nimport os\nfrom pathlib import Path\nimport unittest\n\nimport ray\nimport ray.rllib.agents.marwil as marwil\nfrom ray.rllib.utils.framework import try_import_tf\nfrom ray.rllib.utils.test_utils import check_compute_single_ac...
```python import os from pathlib import Path import unittest import ray import ray.rllib.agents.marwil as marwil from ray.rllib.utils.framework import try_import_tf from ray.rllib.utils.test_utils import check_compute_single_action, \ framework_iterator tf1, tf, tfv = try_import_tf() class TestMARWIL(unittest.TestCase): @classmethod def setUpClass(cls): ray.init() @classmethod def tearDownClass(cls): ray.shutdown() def test_marwil_compilation_and_learning_from_offline_file(self): """Test whether a MARWILTrainer can be built with all frameworks. And learns from a historic-data file. """ rllib_dir = Path(__file__).parent.parent.parent.parent print("rllib dir={}".format(rllib_dir)) data_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json") print("data_file={} exists={}".format(data_file, os.path.isfile(data_file))) config = marwil.DEFAULT_CONFIG.copy() config["num_workers"] = 0 # Run locally. config["evaluation_num_workers"] = 1 config["evaluation_interval"] = 1 # Evaluate on actual environment. config["evaluation_config"] = {"input": "sampler"} # Learn from offline data. config["input"] = [data_file] num_iterations = 300 # Test for all frameworks. for _ in framework_iterator(config): trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0") for i in range(num_iterations): eval_results = trainer.train()["evaluation"] print("iter={} R={}".format( i, eval_results["episode_reward_mean"])) # Learn until some reward is reached on an actual live env. if eval_results["episode_reward_mean"] > 60.0: print("learnt!") break check_compute_single_action( trainer, include_prev_action_reward=True) trainer.stop() if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", __file__])) ```
[ { "content": "Produce an exact reconstruction of the code:\n```python\n#\n# Copyright (c) rPath, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, incl...
[ { "content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n#\n# Copyright (c) rPath, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without r...
```python # # Copyright (c) rPath, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # Michael DeHaan import os import optparse import sys import exceptions class SubCommand(object): ''' base class to a subcommand. You will be extending this. ''' def __init__(self, base_instance): ''' constructor, shouldn't be any need to override this ''' self.base = base_instance def options(self): ''' what options does this command take? Default to no options. ''' return [] def run(self, options, args): ''' implement this to traverse the options and decide how to run the command ''' raise exceptions.NotImplementedError def name(self): ''' what is the name of the subcommand as triggered on the commmand line? ''' return 'generic_subcommand_you_should_override_this' def description(self): ''' what description string to show when listing the subcommands ''' return 'generic description, you should override this' def dispatch(self, cargs): ''' core function around kicking off the subcommand. Don't override this. ''' usage = "%s %s %s [options]" % (os.path.basename(cargs[0]), self.base.name(), self.name()) parser = optparse.OptionParser(usage=usage) for option in self.options(): (short, long, kw) = option parser.add_option(short, long, **kw) (options, args) = parser.parse_args(cargs) return self.run(options, args) class BaseModule(object): ''' base class for a command category that contains subcommands ''' def __init__(self): ''' constructor, no need to override this. ''' pass def name(self): ''' what is the name of the category? ''' raise exceptions.NotImplementedError def description(self): ''' explain what this command does in the help ''' raise exceptions.NotImplementedError def sub_commands(self): ''' return a dictionary of valid subcommands by name ''' raise exceptions.NotImplementedError def run(self, args): ''' defer to subcommands. If you don't want subcommands, override this method! ''' subs = self.sub_commands() if len(args) == 2 or args[2] in ['-h', '--help']: self.list_subcommands(args) return 1 matched = [x for x in subs if x.name() == args[2]] if len(matched) == 1: print "" rc = matched[0].dispatch(args) print "" return rc elif len(matched) > 1: sys.stderr.write("error: multiple commands respond to (%s)\n\n" % (args[2])) else: sys.stderr.write("error: subcommand (%s) not found\n\n" % (args[2])) sys.stderr.write( "error: multiple subcommand modules found with this name") return 1 def list_subcommands(self, args): ''' prints out the subcommands attached to this module. Don't override this. ''' print "" print "usage: %s %s <subcommand> [--options]" % (args[0], self.name()) print "" print " choose a subcommand:" print "" subs = self.sub_commands() for mod in subs: print "%20s - %s" % (mod.name(), mod.description()) print "" def register(): ''' each module plugin must define a register function at top level that returns a module instance ''' return BaseModule() ```
[ { "content": "```python\nfrom django.db import models\nfrom django.conf import settings\nimport bills.models as bill_models\n\nclass GovInfoScraper(models.Model):\n bill_name = models.CharField(max_length=100)\n bill_code = models.CharField(max_length=10)\n comment_startdate = models.DateField()\n c...
[ { "content": "<|memory_start|>```python\nfrom django.db import models\nfrom django.conf import settings\nimport bills.models as bill_models\n\nclass GovInfoScraper(models.Model):\n bill_name = models.CharField(max_length=100)\n bill_code = models.CharField(max_length=10)\n comment_startdate = models.Da...
```python from django.db import models from django.conf import settings import bills.models as bill_models class GovInfoScraper(models.Model): bill_name = models.CharField(max_length=100) bill_code = models.CharField(max_length=10) comment_startdate = models.DateField() comment_enddate = models.DateField() scrape_date = models.DateTimeField(auto_now_add=True) url = models.URLField(null=True, blank=True) reviewed = models.BooleanField(default=False) def convert_to_bill(self): if self.reviewed: raise bill_models.BillException("Cannot re-convert once already converted") bill = bill_models.Bill.objects.create( name=self.bill_name, code=self.bill_code, ) bill_models.PreparliamentaryStage.objects.create( bill=bill, comments_start=self.comment_startdate, comments_end=self.comment_enddate, document_url=self.url ) self.reviewed = True self.save() return bill def __unicode__(self): return "[%s] %s" % (self.bill_code, self.bill_name) class BillsBeforeParliamentScraper(models.Model): bill_name = models.CharField(max_length=100) bill_code = models.CharField(max_length=10) introduced_by = models.CharField(max_length=100) date_introduced = models.DateField() bill_stage = models.CharField(max_length=3, choices=[ ("1", "National Assembly"), ("2", "NCOP"), ("3", "Sent to President"), ("4", "Finalised in an Act"), ("5", "Withdrawn"), ]) document_number = models.CharField(max_length=10) url = models.URLField(null=True, blank=True) committee = models.CharField(max_length=100, null=True, blank=True) reviewed = models.BooleanField(default=False) # TODO - add NCOP and Presidential stages def convert_to_bill(self): if self.reviewed: raise bill_models.BillException("Cannot re-convert once already converted") try: bill = bill_models.Bill.objects.get(code=self.bill_code) except bill_models.Bill.DoesNotExist: bill = bill_models.Bill.objects.create( name=self.bill_name, code=self.bill_code, ) bill_models.ParliamentIntroduction.objects.create( bill=bill, introduced_by=self.introduced_by, date_introduced=self.date_introduced, document_number=self.document_number, url=self.url ) if self.committee: bill_models.ParliamentPortfolioCommittee.objects.create( bill=bill, committee=self.committee ) self.reviewed = True self.save() return bill def __unicode__(self): return "[%s] %s" % (self.bill_code, self.bill_name) class Meta: verbose_name_plural = "Bills before parliament" verbose_name = "Bills before parliament" class ParliamentMinutesScraper(models.Model): filename = models.FileField(upload_to=settings.DIR_PARLIAMENT_MINUTES) house = models.CharField(max_length=20) language = models.CharField(max_length=20) date = models.DateField() scrape_date = models.DateTimeField(auto_now_add=True) url = models.URLField() def __unicode__(self): return "%s - %s" % (self.scrape_date, self.house) ```
[ { "content": "Here is the script:\n```python\n\"\"\"Testing facility for conkit.io.FastaIO\"\"\"\n\n__author__ = \"Felix Simkovic\"\n__date__ = \"09 Sep 2016\"\n\nimport os\nimport unittest\n\nfrom conkit.io.clustal import ClustalParser\nfrom conkit.io._iotools import create_tmp_f\n\n\nclass TestClustalParser(u...
[ { "content": "Here is the script:\n<|memory_start|>```python\n\"\"\"Testing facility for conkit.io.FastaIO\"\"\"\n\n__author__ = \"Felix Simkovic\"\n__date__ = \"09 Sep 2016\"\n\nimport os\nimport unittest\n\nfrom conkit.io.clustal import ClustalParser\nfrom conkit.io._iotools import create_tmp_f\n\n\nclass Tes...
```python """Testing facility for conkit.io.FastaIO""" __author__ = "Felix Simkovic" __date__ = "09 Sep 2016" import os import unittest from conkit.io.clustal import ClustalParser from conkit.io._iotools import create_tmp_f class TestClustalParser(unittest.TestCase): def test_read_1(self): seq = """CLUSTAL W seq_0 MLDLEVVPE-RSLGNEQW-------E-F-TLG-MPLAQAV-AILQKHC-- seq_0 -RIIKNVQV """ f_name = create_tmp_f(content=seq) parser = ClustalParser() with open(f_name, 'r') as f_in: sequence_file = parser.read(f_in) sequence_entry = sequence_file.top_sequence ref_id = "seq_0" self.assertEqual(ref_id, sequence_entry.id) ref_seq = "MLDLEVVPE-RSLGNEQW-------E-F-TLG-MPLAQAV-AILQKHC---RIIKNVQV" self.assertEqual(ref_seq, sequence_entry.seq) os.unlink(f_name) def test_read_2(self): msa = """CLUSTAL W seq_0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA seq_1 BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB seq_2 CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC ************************************************** seq_0 AAAAAAAAA seq_1 BBBBBBBBB seq_2 CCCCCCCCC ********* """ f_name = create_tmp_f(content=msa) parser = ClustalParser() with open(f_name, 'r') as f_in: sequence_file = parser.read(f_in) for i, sequence_entry in enumerate(sequence_file): if i == 0: self.assertEqual('seq_0', sequence_entry.id) self.assertEqual('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', sequence_entry.seq) elif i == 1: self.assertEqual('seq_1', sequence_entry.id) self.assertEqual('BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB', sequence_entry.seq) elif i == 2: self.assertEqual('seq_2', sequence_entry.id) self.assertEqual('CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC', sequence_entry.seq) os.unlink(f_name) def test_read_3(self): msa = """CLUSTAL FORMAT for seq_0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA seq_1 BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB seq_2 CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC seq_0 AAAAAAAAA seq_1 BBBBBBBBB seq_2 CCCCCCCCC """ f_name = create_tmp_f(content=msa) parser = ClustalParser() with open(f_name, 'r') as f_in: sequence_file = parser.read(f_in) for i, sequence_entry in enumerate(sequence_file): if i == 0: self.assertEqual('seq_0', sequence_entry.id) self.assertEqual('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', sequence_entry.seq) elif i == 1: self.assertEqual('seq_1', sequence_entry.id) self.assertEqual('BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB', sequence_entry.seq) elif i == 2: self.assertEqual('seq_2', sequence_entry.id) self.assertEqual('CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC', sequence_entry.seq) os.unlink(f_name) def test_write_1(self): seq = [ "CLUSTAL FORMAT written with ConKit", "", "seq_0\tAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "seq_1\tBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB", "" "seq_0\tAAAAAAAAAAAAAAAAAAAAAA", "seq_1\tBBBBBBBBBBBBBBBBBBBBBB", ] joinedseq = '\n'.join(seq) f_name_in = create_tmp_f(content=joinedseq) f_name_out = create_tmp_f() parser = ClustalParser() with open(f_name_in, 'r') as f_in, open(f_name_out, 'w') as f_out: sequence_file = parser.read(f_in) parser.write(f_out, sequence_file) with open(f_name_out, 'r') as f_in: output = f_in.read().splitlines() self.assertEqual(seq, output) map(os.unlink, [f_name_in, f_name_out]) if __name__ == "__main__": unittest.main(verbosity=2) ```
[ { "content": "Produce an exact reconstruction of the code:\n```python\n'''\nCopyright (C) 2011 Mihnea Dobrescu-Balaur\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nas published by the Free Software Foundation; either version 2\nof...
[ { "content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n'''\nCopyright (C) 2011 Mihnea Dobrescu-Balaur\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nas published by the Free Software Foundation; eith...
```python ''' Copyright (C) 2011 Mihnea Dobrescu-Balaur This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ''' from subprocess import Popen, PIPE def gather_from_cmdline(): ''' returns a list of strings which represent the kernel boot arguments ''' output = Popen(('cat', '/proc/cmdline'), stdout=PIPE).communicate()[0] # process the output output = output.split() output = output[3:] return output if __name__ == '__main__': test = gather_from_cmdline() print test ```
[ { "content": "Recreate the entire code block with identical formatting:\n```python\n# Copyright 2020 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the...
[ { "content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# Copyright 2020 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obta...
```python # Copyright 2020 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A script to check that travis.yml file & protractor.conf.js have the same e2e test suites. """ from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import os import re import python_utils import utils # These 4 test suites are not present in travis ci. # One is extra (ie. (full: [*.js])) and three other test suites are # are being run by CircleCI. TEST_SUITES_NOT_RUN_ON_TRAVIS = [ 'full', 'accessibility', 'adminPage', 'classroomPage', 'classroomPageFileUploadFeatures', 'collections', 'contributorDashboard', 'fileUploadExtensions', 'fileUploadFeatures', 'library', 'navigation', 'playVoiceovers', 'preferences', 'profileFeatures', 'profileMenu', 'publication', 'subscriptions', 'topicsAndSkillsDashboard', 'topicAndStoryEditor', 'topicAndStoryEditorFileUploadFeatures', 'users'] TRAVIS_CI_FILE_PATH = os.path.join(os.getcwd(), '.travis.yml') PROTRACTOR_CONF_FILE_PATH = os.path.join( os.getcwd(), 'core', 'tests', 'protractor.conf.js') SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST = 'coreEditorAndPlayerFeatures' def get_e2e_suite_names_from_jobs_travis_yml_file(): """Extracts the test suites from env/jobs section from the .travis.yml file. Returns: list(str). An alphabetically-sorted list of names of test suites from the jobs section in the .travis.yml file. """ travis_file_content = read_and_parse_travis_yml_file() jobs_str = python_utils.convert_to_bytes(travis_file_content['env']['jobs']) suites_from_jobs = [] # The following line extracts the test suite name from the jobs section # that is in the form RUN_E2E_TESTS_ACCESSIBILITY=true. test_regex = re.compile(r'RUN_E2E_TESTS_([A-Z_]*)=') jobs = test_regex.findall(jobs_str) for job in jobs: suites_from_jobs.append( utils.snake_case_to_camel_case(job.lower())) return sorted(suites_from_jobs) def get_e2e_suite_names_from_script_travis_yml_file(): """Extracts the script section from the .travis.yml file. Returns: list(str). An alphabetically-sorted list of names of test suites from the script section in the .travis.yml file. """ travis_file_content = read_and_parse_travis_yml_file() script_str = python_utils.convert_to_bytes(travis_file_content['script']) # The following line extracts the test suites from patterns like # python -m scripts.run_e2e_tests --suite="accessibility". e2e_test_suite_regex = re.compile(r'--suite="([a-zA-Z_-]*)"') suites_list = e2e_test_suite_regex.findall(script_str) return sorted(suites_list) def get_e2e_suite_names_from_protractor_file(): """Extracts the test suites section from the protractor.conf.js file. Returns: list(str). An alphabetically-sorted list of names of test suites from the protractor.conf.js file. """ protractor_config_file_content = read_protractor_conf_file() # The following line extracts suite object from protractor.conf.js. suite_object_string = re.compile( r'suites = {([^}]+)}').findall(protractor_config_file_content)[0] # The following line extracts the keys/test suites from the "key: value" # pair from the suites object. key_regex = re.compile(r'\b([a-zA-Z_-]*):') protractor_suites = key_regex.findall(suite_object_string) return sorted(protractor_suites) def read_protractor_conf_file(): """Returns the contents of core/tests/protractor.conf.js file. Returns: str. The contents of protractor.conf.js, as a string. """ protractor_config_file_content = python_utils.open_file( PROTRACTOR_CONF_FILE_PATH, 'r').read() return protractor_config_file_content def read_and_parse_travis_yml_file(): """Returns the contents of .travis.yml, as a dict. Returns: dict. Contents of the .travis.yml file parsed as a dict. """ travis_ci_file_content = python_utils.open_file( TRAVIS_CI_FILE_PATH, 'r').read() travis_ci_dict = utils.dict_from_yaml(travis_ci_file_content) return travis_ci_dict def get_e2e_test_filenames_from_protractor_dir(): """Extracts the names of the all test files in core/tests/protractor and core/tests/protractor_desktop directory. Returns: list(str). An alphabetically-sorted list of of the all test files in core/tests/protractor and core/tests/protractor_desktop directory. """ protractor_test_suite_files = [] protractor_files = os.path.join( os.getcwd(), 'core', 'tests', 'protractor') protractor_desktop_files = os.path.join( os.getcwd(), 'core', 'tests', 'protractor_desktop') for file_name in os.listdir(protractor_files): protractor_test_suite_files.append(file_name) for file_name in os.listdir(protractor_desktop_files): protractor_test_suite_files.append(file_name) return sorted(protractor_test_suite_files) def get_e2e_test_filenames_from_protractor_conf_file(): """Extracts the filenames from the suites object of protractor.conf.js file. Returns: list(str). An alphabetically-sorted list of filenames extracted from the protractor.conf.js file. """ protractor_config_file_content = read_protractor_conf_file() # The following line extracts suite object from protractor.conf.js. suite_object_string = re.compile( r'suites = {([^}]+)}').findall(protractor_config_file_content)[0] test_files_regex = re.compile(r'/([a-zA-Z]*.js)') e2e_test_files = test_files_regex.findall(suite_object_string) return sorted(e2e_test_files) def main(): """Test the travis ci file and protractor.conf.js to have same e2e test suites. """ python_utils.PRINT( 'Checking all e2e test files are captured ' 'in protractor.conf.js...') protractor_test_suite_files = get_e2e_test_filenames_from_protractor_dir() protractor_conf_test_suites = ( get_e2e_test_filenames_from_protractor_conf_file()) if not protractor_test_suite_files == protractor_conf_test_suites: raise Exception( 'One or more test file from protractor or protractor_desktop ' 'directory is missing from protractor.conf.js') python_utils.PRINT('Done!') python_utils.PRINT('Checking e2e tests are captured in .travis.yml...') protractor_test_suites = get_e2e_suite_names_from_protractor_file() travis_e2e_suites = get_e2e_suite_names_from_jobs_travis_yml_file() travis_e2e_scripts = get_e2e_suite_names_from_script_travis_yml_file() for excluded_test in TEST_SUITES_NOT_RUN_ON_TRAVIS: protractor_test_suites.remove(excluded_test) if not travis_e2e_suites: raise Exception( 'The e2e test suites that have been extracted from ' 'jobs section from travis.ci are empty.') if not travis_e2e_scripts: raise Exception( 'The e2e test suites that have been extracted from ' 'script section from travis.ci are empty.') if not protractor_test_suites: raise Exception( 'The e2e test suites that have been extracted from ' 'protractor.conf.js are empty.') if SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST not in travis_e2e_scripts: raise Exception( '{} is expected to be in the e2e test suites ' 'extracted from the script section of .travis.yml ' 'file, but it is missing.' .format(SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST)) if SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST not in protractor_test_suites: raise Exception( '{} is expected to be in the e2e test suites ' 'extracted from the protractor.conf.js file, ' 'but it is missing.' .format(SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST)) if protractor_test_suites != travis_e2e_scripts: raise Exception( 'Protractor test suites and Travis Ci test suites are not in sync.') python_utils.PRINT('Done!') # The 'no coverage' pragma is used as this line is un-testable. This is because # it will only be called when check_e2e_tests_are_captured_in_ci.py # is used as a script. if __name__ == '__main__': # pragma: no cover main() ```
[ { "content": "```python\n#!/usr/bin/env python3\n# grouping algorithms for images and videos\n# PixPack Photo Organiser\n\nimport re\nimport os\n\ndef group_by_dates(date_meta, destination, pattern='ym'):\n # generate folder name by using basic date informations\n # available patterns: yr=2017, ym=2017-03...
[ { "content": "<|memory_start|>```python\n#!/usr/bin/env python3\n# grouping algorithms for images and videos\n# PixPack Photo Organiser\n\nimport re\nimport os\n\ndef group_by_dates(date_meta, destination, pattern='ym'):\n # generate folder name by using basic date informations\n # available patterns: yr=...
```python #!/usr/bin/env python3 # grouping algorithms for images and videos # PixPack Photo Organiser import re import os def group_by_dates(date_meta, destination, pattern='ym'): # generate folder name by using basic date informations # available patterns: yr=2017, ym=2017-03, ss=summer # exif date format -> 2006:03:25 21:34:24 # return dest_dir if date_meta == "NOT_FOUND": return os.path.join(destination, "NOT_FOUND") ymd_format = re.match(r"(\d{4}):(\d{2}):(\d{2}) (\d{2}):(\d{2}):(\d{2})", date_meta) year = ymd_format.group(1) month = ymd_format.group(2) day = ymd_format.group(3) hour = ymd_format.group(4) minute = ymd_format.group(5) second = ymd_format.group(6) # group by year if pattern.lower() == 'yr': dest_folder_name = year elif pattern.lower() == 'ym': dest_folder_name = "{year}-{month}".format(year=year, month=month) elif pattern.lower() == 'ss': if int(month) in (12, 1, 2): dest_folder_name = "Winter" elif int(month) in (3, 4, 5): dest_folder_name = "Spring" elif int(month) in (6, 7, 8): dest_folder_name = "Summer" elif int(month) in (9, 10, 11): dest_folder_name = "Fall" return os.path.join(destination, dest_folder_name) ```
[ { "content": "Here is the source code:\n```python\nimport pymongo\n\nfrom barin import schema as S\nfrom barin import event\n\n\n# TODO: test backref logic\n\n\nclass InstanceManager(object):\n\n def __init__(self, manager, instance):\n self._manager = manager\n self.instance = instance\n\n ...
[ { "content": "Here is the source code:\n<|memory_start|>```python\nimport pymongo\n\nfrom barin import schema as S\nfrom barin import event\n\n\n# TODO: test backref logic\n\n\nclass InstanceManager(object):\n\n def __init__(self, manager, instance):\n self._manager = manager\n self.instance = ...
```python import pymongo from barin import schema as S from barin import event # TODO: test backref logic class InstanceManager(object): def __init__(self, manager, instance): self._manager = manager self.instance = instance def __getattr__(self, name): return getattr(self._manager, name) def __dir__(self): return dir(self._manager) + list(self.__dict__.keys()) def synchronize(self, isdel=False): '''Sync all backrefs''' _id = self.instance['_id'] for fname, f in self.fields.items(): if f.backref: v = f.__get__(self.instance) other_cls = self.metadata[f.backref.cname] other_fld = other_cls.m.fields[f.backref.fname] if isinstance(f._schema, S.Array): if isinstance(other_fld._schema, S.Array): self._sync_m2m(_id, f, v, other_cls, other_fld, isdel) else: self._sync_o2m(_id, f, v, other_cls, other_fld, isdel) else: if isinstance(other_fld._schema, S.Array): self._sync_m2o(_id, f, v, other_cls, other_fld, isdel) else: self._sync_o2o(_id, f, v, other_cls, other_fld, isdel) @event.with_hooks('insert') def insert(self): return self._manager.insert_one(self.instance) @event.with_hooks('delete') def delete(self): return self._manager.delete_one( {'_id': self.instance._id}) @event.with_hooks('replace') def replace(self, **kwargs): return self._manager.replace_one( {'_id': self.instance._id}, self.instance, **kwargs) @event.with_hooks('update') def update(self, update_spec, **kwargs): refresh = kwargs.pop('refresh', False) if refresh: obj = self._manager.find_one_and_update( {'_id': self.instance._id}, update_spec, return_document=pymongo.ReturnDocument.AFTER, **kwargs) if obj: self.instance.clear() self.instance.update(obj) else: # Object has been deleted return None else: return self._manager.update_one( {'_id': self.instance._id}, update_spec, **kwargs) def _sync_m2m(self, this_id, this_fld, this_val, other_cls, other_fld, isdel): "this is an array, other is an array" q = other_cls.m.query.match(other_fld == this_id) if not isdel: q = q.match(other_cls._id.nin(this_val)) q.update_many(other_fld.pull(this_id)) if isdel: return q = (other_cls.m.query .match(other_cls._id.in_(this_val))) q.update_many(other_fld.add_to_set(this_id)) def _sync_o2m(self, this_id, this_fld, this_val, other_cls, other_fld, isdel): "this is an array, other is a scalar" q = other_cls.m.query.match(other_fld == this_id) if not isdel: q = q.match(other_cls._id.nin(this_val)) q.update_many(other_fld.set(None)) if isdel: return q = (other_cls.m.query .match(other_cls._id.in_(this_val))) q.update_many(other_fld.set(this_id)) def _sync_m2o(self, this_id, this_fld, this_val, other_cls, other_fld, isdel): "this is a scalar, other is an array" q = other_cls.m.query.match(other_fld == this_id) if not isdel: q = q.match(other_cls._id != this_val) q.update_many(other_fld.pull(this_id)) if isdel: return q = other_cls.m.query.match(other_cls._id == this_val) q.update_one(other_fld.add_to_set(this_id)) def _sync_o2o(self, this_id, this_fld, this_val, other_cls, other_fld, isdel): "this is a scalar, other is a scalar" q = other_cls.m.query.match(other_fld == this_id) if not isdel: q = q.match(other_cls._id != this_val) q.update_many(other_fld.set(None)) if isdel: return q = other_cls.m.query.match(other_cls._id == this_val) q.update_one(other_fld.set(this_id)) ```
[ { "content": "```python\n# -*- coding: utf-8 -*-\nimport leancloud\nfrom leancloud import Object\nfrom leancloud import LeanCloudError\nfrom leancloud import Query\nfrom leancloud import User\n\n\nfrom wsgi import signer\n\n\n\nnot_binary_label_dict = {'field':['field__manufacture', 'field__financial', 'field__...
[ { "content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport leancloud\nfrom leancloud import Object\nfrom leancloud import LeanCloudError\nfrom leancloud import Query\nfrom leancloud import User\n\n\nfrom wsgi import signer\n\n\n\nnot_binary_label_dict = {'field':['field__manufacture', 'field__fina...
```python # -*- coding: utf-8 -*- import leancloud from leancloud import Object from leancloud import LeanCloudError from leancloud import Query from leancloud import User from wsgi import signer not_binary_label_dict = {'field':['field__manufacture', 'field__financial', 'field__infotech', 'field__law', 'field__agriculture', 'field__human_resource', 'field__commerce', 'field__natural', 'field__service', 'field__humanities', 'field__medical', 'field__architecture', 'field__athlete'], 'age':['age__16to35', 'age__35to55', 'age__55up', 'age__16down'], 'sport':['sport__basketball', 'sport__bicycling', 'sport__tabel_tennis', 'sport__football', 'sport__jogging', 'sport__badminton', 'sport__fitness'],'consumption': ['consumption__10000to20000', 'consumption__20000up', 'consumption__5000to10000', 'consumption__5000down'], 'occupation':['occupation__freelancer', 'occupation__supervisor', 'occupation__student', 'occupation__others', 'occupation__official', 'occupation__salesman', 'occupation__teacher', 'occupation__soldier', 'occupation__engineer']} binary_label_list = [u'ACG', u'indoorsman', u'game_show', u'has_car', u'game_news', u'entertainment_news', u'health', u'online_shopping', u'variety_show', u'business_news', u'tvseries_show', u'current_news', u'sports_news', u'tech_news', u'offline_shopping', u'pregnant', u'gender', u'study', u'married', u'sports_show', u'gamer', u'social', u'has_pet'] query_limit = 1000 class Dashboard: def __init__(self): self.app_id = None pass def get_all_tracker(self): try: # 这里不能认为终端用户的数量少于1000 Application = Object.extend('Application') query = Query(Application) query.equal_to('app_id',self.app_id) query.ascending('createdAt') query.limit(1000) result_list = query.find() all_tracker_dict = {} if result_list: for result in result_list: all_tracker_dict[result.get('username')] = result.id self.all_tracker_dict = all_tracker_dict return 1 except LeanCloudError,e: print e return 0 def get_age_and_gender_data_dict(self,table_name='AppStaticInfo',filed_name = 'app'): try: Application = Object.extend('Application') query = Query(Application) query.equal_to('app_id',self.app_id) result_list = query.find() length = len(result_list) if length==0: print 'error: application not exists in table Applicaiton' return 0 elif length != 1: print 'error: multi application exists in table Applicaiton' return 0 else: app = result_list[0] DbTable = Object.extend(table_name) query = Query(DbTable) query.equal_to(filed_name,app) result_list = query.find() length = len(result_list) if length==0: print 'error: application not exists in table %s' %(str(table_name)) return 0 elif length != 1: print 'error: multi application exists in table %s' %(str(table_name)) return 0 else: app_static_info = result_list[0] age_and_gender_dict = app_static_info.get('age_and_gender') return age_and_gender_dict # WeightedStaticInfo = Object.extend('WeightedStaticInfo') # query = Query(WeightedStaticInfo) # query.exists('objectId') # query.select('age','gender') # staticInfoList = query.find() # gender_type_list =['man','woman'] # age_type_list = ['16down','16to35','35to55','55up'] # dataDict ={gender_type:{age_type:0 for age_type in age_type_list} for gender_type in gender_type_list} # # for staticInfo in staticInfoList: # gender = 'man' if staticInfo.get('gender') >0 else 'woman' # age_info_dict= staticInfo.get('age') # dataDict[gender][age_info_dict.keys()[0]] += 1 # # dataDict ={'man' if staticInfo.get('gender') >0 else 'woman':dataDict['man' if staticInfo.get('gender') >0 else 'woman'][staticInfo.get('age').keys()[0]] +=1 for staticInfo in staticInfoList} # new_data_dict = {key:[0 for i in range(4)] for key in dataDict.keys()} # for index ,age_type in enumerate(age_type_list): # for gender_type in dataDict.keys(): # new_data_dict[gender_type][index] = dataDict[gender_type][age_type] except LeanCloudError, e: raise e return age_and_gender_dict def get_occupation_data_dict(self): try: WeightedStaticInfo = Object.extend('WeightedStaticInfo') query = Query(WeightedStaticInfo) query.exists('objectId') staticInfoList = query.find() dataDict ={gender_type:{age_type:0 for age_type in age_type_list} for gender_type in gender_type_list} for staticInfo in staticInfoList: gender = 'man' if staticInfo.get('gender') >0 else 'woman' age_info_dict= staticInfo.get('age') dataDict[gender][age_info_dict.keys()[0]] += 1 # dataDict ={'man' if staticInfo.get('gender') >0 else 'woman':dataDict['man' if staticInfo.get('gender') >0 else 'woman'][staticInfo.get('age').keys()[0]] +=1 for staticInfo in staticInfoList} new_data_dict = {key:[0 for i in range(4)] for key in dataDict.keys()} for index ,age_type in enumerate(age_type_list): for gender_type in dataDict.keys(): new_data_dict[gender_type][index] = dataDict[gender_type][age_type] except LeanCloudError, e: raise e return new_data_dict #下面三个函数的代码可以优化合并 def get_location_distribution_data_dict(self): field = 'location' k = 5 unknown = 'unknown' try: WeightedStaticInfo = Object.extend('WeightedUserContext') query = Query(WeightedStaticInfo) query.exists('objectId') query.select(field) # 这个地方后面需要做根据applicationid查询 #另外也需要分组查询 resultList = query.find() seen_location_dict = {} user_count = len(resultList) for result in resultList: location_dict = result.get(field) for key, value in location_dict.items(): if key in seen_location_dict.keys(): seen_location_dict[key] += location_dict[key] else: seen_location_dict[key] = location_dict[key] total_unknown_location_value = seen_location_dict.get(unknown) #如果seen_location_dict中含有unknown字段的话,就删掉 if total_unknown_location_value: del seen_location_dict[unknown] sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True) sorted_frequent_location = sorted_seen_location[0:k] total_known_time = user_count - total_unknown_location_value sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location] sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage]))) except LeanCloudError, e: raise e return sorted_frequent_location_percentage def get_motion_distribution_data_dict(self): field = 'motion' k = 5 unknown = 'unknown' try: WeightedStaticInfo = Object.extend('WeightedUserContext') query = Query(WeightedStaticInfo) query.exists('objectId') query.select(field) # 这个地方后面需要做根据applicationid查询 #另外也需要分组查询 resultList = query.find() seen_location_dict = {} user_count = len(resultList) for result in resultList: location_dict = result.get(field) for key, valu in location_dict.items(): if key in seen_location_dict.keys(): seen_location_dict[key] += location_dict[key] else: seen_location_dict[key] = location_dict[key] total_unknown_location_value = seen_location_dict.get(unknown) #如果seen_location_dict中含有unknown字段的话,就删掉 if total_unknown_location_value: del seen_location_dict[unknown] sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True) sorted_frequent_location = sorted_seen_location[0:k] total_known_time = user_count - total_unknown_location_value sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location] sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage]))) except LeanCloudError, e: raise e return sorted_frequent_location_percentage def get_sound_distribution_data_dict(self): field = 'sound' k = 5 unknown = 'unknown' try: WeightedStaticInfo = Object.extend('WeightedUserContext') query = Query(WeightedStaticInfo) query.exists('objectId') query.select(field) # 这个地方后面需要做根据applicationid查询 #另外也需要分组查询 resultList = query.find() seen_location_dict = {} user_count = len(resultList) for result in resultList: location_dict = result.get(field) for key, valu in location_dict.items(): if key in seen_location_dict.keys(): seen_location_dict[key] += location_dict[key] else: seen_location_dict[key] = location_dict[key] total_unknown_location_value = seen_location_dict.get(unknown) #如果seen_location_dict中含有unknown字段的话,就删掉 if total_unknown_location_value: del seen_location_dict[unknown] sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True) sorted_frequent_location = sorted_seen_location[0:k] total_known_time = user_count - total_unknown_location_value sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location] sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage]))) except LeanCloudError, e: raise e return sorted_frequent_location_percentage def get_event_to_activity_data(self,application_id,event_name,db_name='EventActivity'): try: DbTable = Object.extend(db_name) query = Query(DbTable) #这里只是测试知道是少于1K条的 query.equal_to('event_name',event_name) # query.equal_to('application_id',application_id) query.descending('createdAt') query.limit(1) result = query.find() activity_statistics_dict = result[0].get('activity_dict') except LeanCloudError, e: raise e return activity_statistics_dict # query.select('user','timestamp') # resultList = query.find() # DBTable = Object.extend('MergedUserContext') # activity_dict = {} # total_count = len(resultList) # print 'the length of resultList is : %s' %(str(total_count)) # for index1,result in enumerate(resultList): # query = Query(DBTable) # query.equal_to('user',result.get('user')) # query.less_than_or_equal_to('startTime',result.get('timestamp')) # query.greater_than_or_equal_to('endTime',result.get('timestamp')) # resultList1 = query.find() # if len(resultList1) == 1 or len(resultList1) == 2 : # activity = resultList1[0].get('event')[0] # if activity in activity_dict.keys(): # activity_dict[activity]+=1 # else: # activity_dict[activity] =1 # else: # print 'length of resultList1: %s' %(str(len(resultList1))) # print 'Seems to be an error,index: %s,user: %s; timestamp: %s \n' %(str(index1),str(result.get('user').id),str(result.get('timestamp'))) # # activity_dict['others'] = total_count-sum(activity_dict.values()) #注意这里的Developer并没有继承自User class Developer: def __init__(self,user_id=None): self.user = User() self.user_id = user_id @classmethod def is_valid_email(self,email): query = Query(User) query.exists('email',email) return 0 if query.find() else 1; def signup(self,email,username,password): self.user.set('email',email) self.user.set('username',username) self.user.set('password',password) try: result = self.user.sign_up() print result return 1 except LeanCloudError,e: print e return 0 def login_with_email(self,email,password): # self.user.login(email,password) pass def login_with_username(self,username,password): try: self.user.login(username,password) self.user_id = self.user.id self.session_token = self.user.get_session_token() print 'user.id: %s' %(str(self.user_id)) print 'session_token: %s' %(str(self.session_token)) return 1 except LeanCloudError,e: print e return 0 def init_developer_with_user_id(self,user_id): query = Query(User) query.equal_to('objectId',user_id) result = query.find() if len(result)==1: return result[0] else: print len(result) print user_id def get_all_application(self): try: # 这里认为应用的数量少于1000 Application = Object.extend('Application') query = Query(Application) query.equal_to('user',self.user.become(self.session_token)) query.ascending('createdAt') query.limit(1000) result_list = query.find() all_application_dict = {} if result_list: for result in result_list: all_application_dict[result.get('app_name')] = result.get('app_id') self.all_application_dict = all_application_dict return 1 except LeanCloudError,e: print e return 0 def get_all_tracker(self): try: # 这里认为应用的数量少于1000 Tracker = Object.extend('Tracker') query = Query(Tracker) query.exists('objectId') query.ascending('createdAt') query.limit(1000) result_list = query.find() all_tracker_dict = {} if result_list: for result in result_list: all_tracker_dict[result.get('username')] = result.id self.all_tracker_dict = all_tracker_dict return 1 except LeanCloudError,e: print e return 0 def create_new_app(self,app_name): try: user = self.user.become(self.session_token) Application = Object.extend('Application') application = Application() query = Query(Application) query.equal_to('user',user) query.equal_to('app_name',app_name) if query.find(): print 'Application name exists!' return 0 else: application.set('app_name',app_name) application.set('user',user) application.save() app_id = application.id app_key = (signer.sign(app_id).split(app_id+'.'))[1] # app_key = app_id+"this is app_key" application.set('app_id',app_id) application.set('app_key',app_key) application.save() return 1 except LeanCloudError,e: print e return 0 # query = Query(Application) # app_id = signer.sign(app_name).split(app_name+'.')[1] # query.equal_to('user',user) # query.equal_to('app_id',app_id) # if query.find(): # print 'Application name exists' # return 0 # else: # application.set('app_name',app_name) # application.set('app_id',app_id) # application.set('user',user) # application.save() def connect_new_tracker(self,tracker_id='',app_id=''): try: user = self.user.become(self.session_token) Application = Object.extend('Application') query = Query(Application) query.equal_to('user',user) query.equal_to('app_id',app_id) app_list = query.find() if len(app_list)!=1: print 'error with the length of app_list: %s' %(str(len(app_list))) return 0 else: the_app = app_list[0] print 'successfully get the app with app_id: %s' %(str(the_app.id)) Tracker = Object.extend('Tracker') query = Query(Tracker) query.equal_to('objectId',tracker_id) tracker_list = query.find() if len(tracker_list) != 1: print "error with the length of tracker_list: %s" %(str(len(tracker_list))) return 0 else: the_tracker = tracker_list[0] print 'successfully get the tracker with object_id: %s' %(str(the_tracker.id)) app_relation_to_tracker = the_app.relation('tracker') # tracker_relation_to_app = the_tracker.relation('application') app_relation_to_tracker.add(the_tracker) # tracker_relation_to_app.add(the_app) print 'ready to save' # the_tracker.save() # print 'successful save the_tracker' the_app.save() print 'successful save the_app' return 1 except LeanCloudError,e: print e return 0 # # -*- coding: utf-8 -*- # import leancloud # from leancloud import Object # from leancloud import LeanCloudError # from leancloud import Query # from leancloud import User # import time # import datetime # import operator # import numpy as np # from logentries import LogentriesHandler # import logging # # from flask import current_app # # from wsgi import signer # # # not_binary_label_dict = {'field':['field__manufacture', 'field__financial', 'field__infotech', 'field__law', 'field__agriculture', 'field__human_resource', 'field__commerce', 'field__natural', 'field__service', 'field__humanities', 'field__medical', 'field__architecture', 'field__athlete'], 'age':['age__16to35', 'age__35to55', 'age__55up', 'age__16down'], 'sport':['sport__basketball', 'sport__bicycling', 'sport__tabel_tennis', 'sport__football', 'sport__jogging', 'sport__badminton', 'sport__fitness'],'consumption': ['consumption__10000to20000', 'consumption__20000up', 'consumption__5000to10000', 'consumption__5000down'], 'occupation':['occupation__freelancer', 'occupation__supervisor', 'occupation__student', 'occupation__others', 'occupation__official', 'occupation__salesman', 'occupation__teacher', 'occupation__soldier', 'occupation__engineer']} # binary_label_list = [u'ACG', u'indoorsman', u'game_show', u'has_car', u'game_news', u'entertainment_news', u'health', u'online_shopping', u'variety_show', u'business_news', u'tvseries_show', u'current_news', u'sports_news', u'tech_news', u'offline_shopping', u'pregnant', u'gender', u'study', u'married', u'sports_show', u'gamer', u'social', u'has_pet'] # # class Dashboard: # def get_age_and_gender_data_dict(self): # try: # WeightedStaticInfo = Object.extend('WeightedStaticInfo') # query = Query(WeightedStaticInfo) # query.exists('objectId') # query.select('age','gender') # staticInfoList = query.find() # gender_type_list =['man','woman'] # age_type_list = ['16down','16to35','35to55','55up'] # dataDict ={gender_type:{age_type:0 for age_type in age_type_list} for gender_type in gender_type_list} # # for staticInfo in staticInfoList: # gender = 'man' if staticInfo.get('gender') >0 else 'woman' # age_info_dict= staticInfo.get('age') # dataDict[gender][age_info_dict.keys()[0]] += 1 # # dataDict ={'man' if staticInfo.get('gender') >0 else 'woman':dataDict['man' if staticInfo.get('gender') >0 else 'woman'][staticInfo.get('age').keys()[0]] +=1 for staticInfo in staticInfoList} # new_data_dict = {key:[0 for i in range(4)] for key in dataDict.keys()} # for index ,age_type in enumerate(age_type_list): # for gender_type in dataDict.keys(): # new_data_dict[gender_type][index] = dataDict[gender_type][age_type] # # except LeanCloudError, e: # # raise e # return new_data_dict # def get_occupation_data_dict(self): # try: # WeightedStaticInfo = Object.extend('WeightedStaticInfo') # query = Query(WeightedStaticInfo) # query.exists('objectId') # staticInfoList = query.find() # dataDict ={gender_type:{age_type:0 for age_type in age_type_list} for gender_type in gender_type_list} # # for staticInfo in staticInfoList: # gender = 'man' if staticInfo.get('gender') >0 else 'woman' # age_info_dict= staticInfo.get('age') # dataDict[gender][age_info_dict.keys()[0]] += 1 # # dataDict ={'man' if staticInfo.get('gender') >0 else 'woman':dataDict['man' if staticInfo.get('gender') >0 else 'woman'][staticInfo.get('age').keys()[0]] +=1 for staticInfo in staticInfoList} # new_data_dict = {key:[0 for i in range(4)] for key in dataDict.keys()} # for index ,age_type in enumerate(age_type_list): # for gender_type in dataDict.keys(): # new_data_dict[gender_type][index] = dataDict[gender_type][age_type] # # except LeanCloudError, e: # # raise e # return new_data_dict # # #下面三个函数的代码可以优化合并 # def get_location_distribution_data_dict(self): # field = 'location' # k = 5 # unknown = 'unknown' # try: # WeightedStaticInfo = Object.extend('WeightedUserContext') # query = Query(WeightedStaticInfo) # query.exists('objectId') # query.select(field) # # 这个地方后面需要做根据applicationid查询 # #另外也需要分组查询 # resultList = query.find() # seen_location_dict = {} # user_count = len(resultList) # # for result in resultList: # location_dict = result.get(field) # for key, valu in location_dict.items(): # if key in seen_location_dict.keys(): # seen_location_dict[key] += location_dict[key] # else: # seen_location_dict[key] = location_dict[key] # total_unknown_location_value = seen_location_dict.get(unknown) # #如果seen_location_dict中含有unknown字段的话,就删掉 # if total_unknown_location_value: # del seen_location_dict[unknown] # # sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True) # sorted_frequent_location = sorted_seen_location[0:k] # total_known_time = user_count - total_unknown_location_value # sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location] # sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage]))) # # # # except LeanCloudError, e: # # raise e # return sorted_frequent_location_percentage # def get_motion_distribution_data_dict(self): # field = 'motion' # k = 5 # unknown = 'unknown' # try: # WeightedStaticInfo = Object.extend('WeightedUserContext') # query = Query(WeightedStaticInfo) # query.exists('objectId') # query.select(field) # # 这个地方后面需要做根据applicationid查询 # #另外也需要分组查询 # resultList = query.find() # seen_location_dict = {} # user_count = len(resultList) # # for result in resultList: # location_dict = result.get(field) # for key, valu in location_dict.items(): # if key in seen_location_dict.keys(): # seen_location_dict[key] += location_dict[key] # else: # seen_location_dict[key] = location_dict[key] # total_unknown_location_value = seen_location_dict.get(unknown) # #如果seen_location_dict中含有unknown字段的话,就删掉 # if total_unknown_location_value: # del seen_location_dict[unknown] # # sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True) # sorted_frequent_location = sorted_seen_location[0:k] # total_known_time = user_count - total_unknown_location_value # sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location] # sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage]))) # # # # except LeanCloudError, e: # # raise e # return sorted_frequent_location_percentage # def get_sound_distribution_data_dict(self): # field = 'sound' # k = 5 # unknown = 'unknown' # try: # WeightedStaticInfo = Object.extend('WeightedUserContext') # query = Query(WeightedStaticInfo) # query.exists('objectId') # query.select(field) # # 这个地方后面需要做根据applicationid查询 # #另外也需要分组查询 # resultList = query.find() # seen_location_dict = {} # user_count = len(resultList) # # for result in resultList: # location_dict = result.get(field) # for key, valu in location_dict.items(): # if key in seen_location_dict.keys(): # seen_location_dict[key] += location_dict[key] # else: # seen_location_dict[key] = location_dict[key] # total_unknown_location_value = seen_location_dict.get(unknown) # #如果seen_location_dict中含有unknown字段的话,就删掉 # if total_unknown_location_value: # del seen_location_dict[unknown] # # sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True) # sorted_frequent_location = sorted_seen_location[0:k] # total_known_time = user_count - total_unknown_location_value # sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location] # sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage]))) # # except LeanCloudError, e: # # raise e # return sorted_frequent_location_percentage # # # # # # def get_event_to_activity_data(self,application_id,event_name,db_name='EventActivity'): # try: # DbTable = Object.extend(db_name) # query = Query(DbTable) # #这里只是测试知道是少于1K条的 # query.equal_to('event_name',event_name) # # query.equal_to('application_id',application_id) # query.descending('createdAt') # query.limit(1) # result = query.find() # activity_statistics_dict = result[0].get('activity_dict') # # # except LeanCloudError, e: # # raise e # return activity_statistics_dict # # # query.select('user','timestamp') # # resultList = query.find() # # DBTable = Object.extend('MergedUserContext') # # activity_dict = {} # # total_count = len(resultList) # # print 'the length of resultList is : %s' %(str(total_count)) # # for index1,result in enumerate(resultList): # # query = Query(DBTable) # # query.equal_to('user',result.get('user')) # # query.less_than_or_equal_to('startTime',result.get('timestamp')) # # query.greater_than_or_equal_to('endTime',result.get('timestamp')) # # resultList1 = query.find() # # if len(resultList1) == 1 or len(resultList1) == 2 : # # activity = resultList1[0].get('event')[0] # # if activity in activity_dict.keys(): # # activity_dict[activity]+=1 # # else: # # activity_dict[activity] =1 # # else: # # print 'length of resultList1: %s' %(str(len(resultList1))) # # print 'Seems to be an error,index: %s,user: %s; timestamp: %s \n' %(str(index1),str(result.get('user').id),str(result.get('timestamp'))) # # # # activity_dict['others'] = total_count-sum(activity_dict.values()) # # # # # # # # class Developer: # def __init__(self,user_id=None): # self.user = User() # self.user_id = user_id # # @classmethod # def is_valid_email(self,email): # query = Query(User) # query.exists('email',email) # return 0 if query.find() else 1; # # def signup(self,email,username,password): # self.user.set('email',email) # self.user.set('username',username) # self.user.set('password',password) # try: # result = self.user.sign_up() # print result # return 1 # except LeanCloudError,e: # print e # return 0 # # def login_with_email(self,email,password): # # self.user.login(email,password) # pass # # def login_with_username(self,username,password): # try: # self.user.login(username,password) # self.user_id = self.user.id # self.session_token = self.user.get_session_token() # print 'user.id: %s' %(str(self.user_id)) # print 'session_token: %s' %(str(self.session_token)) # return 1 # except LeanCloudError,e: # print e # return 0 # # def init_developer_with_user_id(self,user_id): # query = Query(User) # query.equal_to('objectId',user_id) # result = query.find() # if len(result)==1: # return result[0] # else: # print len(result) # print user_id # # def create_new_app(self,app_name): # try: # developer = self.init_developer_with_user_id(self.user_id) # signed_key = signer.sign(app_name) # Application = Object.extend('Application') # application = Application() # application.set('application_name',app_name) # application.set('user',developer) # application.save() # app_id = application.id # app_key = signer.sign(app_id).split(app_id+'.')[1] # application.set('app_id',app_id) # application.set('app_key',app_key) # application.save() # return 1 # except LeanCloudError,e: # print e # return 0 # pass # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ```
[ { "content": "Provide a verbatim copy of the code:\n```python\n# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, di...
[ { "content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file...
```python # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from six import StringIO, string_types import csv import warnings from copy import deepcopy import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from IPython.core.pylabtools import print_figure from IPython.core.display import Image, SVG import numpy as np import pandas as pd from scipy.spatial.distance import squareform from skbio._base import SkbioObject from skbio.stats import p_value_to_str from skbio.stats._misc import _pprint_strs class DissimilarityMatrixError(Exception): """General error for dissimilarity matrix validation failures.""" pass class DistanceMatrixError(DissimilarityMatrixError): """General error for distance matrix validation failures.""" pass class MissingIDError(DissimilarityMatrixError): """Error for ID lookup that doesn't exist in the dissimilarity matrix.""" def __init__(self, missing_id): super(MissingIDError, self).__init__() self.args = ("The ID '%s' is not in the dissimilarity matrix." % missing_id,) class DissimilarityMatrix(SkbioObject): """Store dissimilarities between objects. A `DissimilarityMatrix` instance stores a square, hollow, two-dimensional matrix of dissimilarities between objects. Objects could be, for example, samples or DNA sequences. A sequence of IDs accompanies the dissimilarities. Methods are provided to load and save dissimilarity matrices from/to disk, as well as perform common operations such as extracting dissimilarities based on object ID. Parameters ---------- data : array_like or DissimilarityMatrix Square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities (floats), or a structure that can be converted to a ``numpy.ndarray`` using ``numpy.asarray``. Can instead be a `DissimilarityMatrix` (or subclass) instance, in which case the instance's data will be used. Data will be converted to a float ``dtype`` if necessary. A copy will *not* be made if already a ``numpy.ndarray`` with a float ``dtype``. ids : sequence of str, optional Sequence of strings to be used as object IDs. Must match the number of rows/cols in `data`. If ``None`` (the default), IDs will be monotonically-increasing integers cast as strings, with numbering starting from zero, e.g., ``('0', '1', '2', '3', ...)``. Attributes ---------- data ids dtype shape size T png svg See Also -------- DistanceMatrix Notes ----- The dissimilarities are stored in redundant (square-form) format [1]_. The data are not checked for symmetry, nor guaranteed/assumed to be symmetric. References ---------- .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html """ default_write_format = 'lsmat' # Used in __str__ _matrix_element_name = 'dissimilarity' @classmethod def from_file(cls, lsmat_f, delimiter='\t'): """Load dissimilarity matrix from delimited text file. .. note:: Deprecated in scikit-bio 0.2.0-dev ``from_file`` will be removed in scikit-bio 0.3.0. It is replaced by ``read``, which is a more general method for deserializing dissimilarity/distance matrices. ``read`` supports multiple file formats, automatic file format detection, etc. by taking advantage of scikit-bio's I/O registry system. See :mod:`skbio.io` for more details. Creates a ``DissimilarityMatrix`` (or subclass) instance from a ``lsmat`` formatted file. See :mod:`skbio.io.lsmat` for the format specification. Parameters ---------- lsmat_f: filepath or filehandle File to read from. delimiter : str, optional String delimiting elements in `lsmat_f`. Returns ------- DissimilarityMatrix Instance of type `cls` containing the parsed contents of `lsmat_f`. See Also -------- read """ warnings.warn( "DissimilarityMatrix.from_file and DistanceMatrix.from_file are " "deprecated and will be removed in scikit-bio 0.3.0. Please " "update your code to use DissimilarityMatrix.read and " "DistanceMatrix.read.", DeprecationWarning) return cls.read(lsmat_f, format='lsmat', delimiter=delimiter) def to_file(self, out_f, delimiter='\t'): """Save dissimilarity matrix to file as delimited text. .. note:: Deprecated in scikit-bio 0.2.0-dev ``to_file`` will be removed in scikit-bio 0.3.0. It is replaced by ``write``, which is a more general method for serializing dissimilarity/distance matrices. ``write`` supports multiple file formats by taking advantage of scikit-bio's I/O registry system. See :mod:`skbio.io` for more details. Serializes dissimilarity matrix as a ``lsmat`` formatted file. See :mod:`skbio.io.lsmat` for the format specification. Parameters ---------- out_f : filepath or filehandle File to write to. delimiter : str, optional Delimiter used to separate elements in output format. See Also -------- write """ warnings.warn( "DissimilarityMatrix.to_file and DistanceMatrix.to_file are " "deprecated and will be removed in scikit-bio 0.3.0. Please " "update your code to use DissimilarityMatrix.write and " "DistanceMatrix.write.", DeprecationWarning) self.write(out_f, format='lsmat', delimiter=delimiter) def __init__(self, data, ids=None): if isinstance(data, DissimilarityMatrix): data = data.data data = np.asarray(data, dtype='float') if ids is None: ids = (str(i) for i in range(data.shape[0])) ids = tuple(ids) self._validate(data, ids) self._data = data self._ids = ids self._id_index = self._index_list(self._ids) @property def data(self): """Array of dissimilarities. A square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities (floats). A copy is *not* returned. Notes ----- This property is not writeable. """ return self._data @property def ids(self): """Tuple of object IDs. A tuple of strings, one for each object in the dissimilarity matrix. Notes ----- This property is writeable, but the number of new IDs must match the number of objects in `data`. """ return self._ids @ids.setter def ids(self, ids_): ids_ = tuple(ids_) self._validate(self.data, ids_) self._ids = ids_ self._id_index = self._index_list(self._ids) @property def dtype(self): """Data type of the dissimilarities.""" return self.data.dtype @property def shape(self): """Two-element tuple containing the dissimilarity matrix dimensions. Notes ----- As the dissimilarity matrix is guaranteed to be square, both tuple entries will always be equal. """ return self.data.shape @property def size(self): """Total number of elements in the dissimilarity matrix. Notes ----- Equivalent to ``self.shape[0] * self.shape[1]``. """ return self.data.size @property def T(self): """Transpose of the dissimilarity matrix. See Also -------- transpose """ return self.transpose() def transpose(self): """Return the transpose of the dissimilarity matrix. Notes ----- A deep copy is returned. Returns ------- DissimilarityMatrix Transpose of the dissimilarity matrix. Will be the same type as `self`. """ return self.__class__(self.data.T.copy(), deepcopy(self.ids)) def index(self, lookup_id): """Return the index of the specified ID. Parameters ---------- lookup_id : str ID whose index will be returned. Returns ------- int Row/column index of `lookup_id`. Raises ------ MissingIDError If `lookup_id` is not in the dissimilarity matrix. """ if lookup_id in self: return self._id_index[lookup_id] else: raise MissingIDError(lookup_id) def redundant_form(self): """Return an array of dissimilarities in redundant format. As this is the native format that the dissimilarities are stored in, this is simply an alias for `data`. Returns ------- ndarray Two-dimensional ``numpy.ndarray`` of dissimilarities in redundant format. Notes ----- Redundant format is described in [1]_. Does *not* return a copy of the data. References ---------- .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html """ return self.data def copy(self): """Return a deep copy of the dissimilarity matrix. Returns ------- DissimilarityMatrix Deep copy of the dissimilarity matrix. Will be the same type as `self`. """ # We deepcopy IDs in case the tuple contains mutable objects at some # point in the future. return self.__class__(self.data.copy(), deepcopy(self.ids)) def filter(self, ids, strict=True): """Filter the dissimilarity matrix by IDs. Parameters ---------- ids : iterable of str IDs to retain. May not contain duplicates or be empty. Each ID must be present in the dissimilarity matrix. strict : bool, optional If `strict` is ``True`` and an ID that is not found in the distance matrix is found in `ids`, a ``MissingIDError`` exception will be raised, otherwise the ID will be ignored. Returns ------- DissimilarityMatrix Filtered dissimilarity matrix containing only the IDs specified in `ids`. IDs will be in the same order as they appear in `ids`. Raises ------ MissingIDError If an ID in `ids` is not in the object's list of IDs. """ if strict: idxs = [self.index(id_) for id_ in ids] else: # get the indices to slice the inner numpy array idxs = [] # save the IDs that were found in the distance matrix found_ids = [] for id_ in ids: try: idxs.append(self.index(id_)) found_ids.append(id_) except MissingIDError: pass ids = found_ids filtered_data = self._data[idxs][:, idxs] return self.__class__(filtered_data, ids) def plot(self, cmap=None, title=""): """Creates a heatmap of the dissimilarity matrix Parameters ---------- cmap: str or matplotlib.colors.Colormap, optional Sets the color scheme of the heatmap If ``None``, defaults to the colormap specified in the matplotlib rc file. title: str, optional Sets the title label of the heatmap (Default is blank) Returns ------- matplotlib.figure.Figure Figure containing the heatmap and colorbar of the plotted dissimilarity matrix. Examples -------- .. plot:: Define a dissimilarity matrix with five objects labeled A-E: >>> from skbio.stats.distance import DissimilarityMatrix >>> dm = DissimilarityMatrix([[0, 1, 2, 3, 4], [1, 0, 1, 2, 3], ... [2, 1, 0, 1, 2], [3, 2, 1, 0, 1], ... [4, 3, 2, 1, 0]], ... ['A', 'B', 'C', 'D', 'E']) Plot the dissimilarity matrix as a heatmap: >>> fig = dm.plot(cmap='Reds', title='Example heatmap') """ # based on http://stackoverflow.com/q/14391959/3776794 fig, ax = plt.subplots() # use pcolormesh instead of pcolor for performance heatmap = ax.pcolormesh(self.data, cmap=cmap) fig.colorbar(heatmap) # center labels within each cell ticks = np.arange(0.5, self.shape[0]) ax.set_xticks(ticks, minor=False) ax.set_yticks(ticks, minor=False) # display data as it is stored in the dissimilarity matrix # (default is to have y-axis inverted) ax.invert_yaxis() ax.set_xticklabels(self.ids, rotation=90, minor=False) ax.set_yticklabels(self.ids, minor=False) ax.set_title(title) return fig def _repr_png_(self): return self._figure_data('png') def _repr_svg_(self): return self._figure_data('svg') @property def png(self): """Display heatmap in IPython Notebook as PNG. """ return Image(self._repr_png_(), embed=True) @property def svg(self): """Display heatmap in IPython Notebook as SVG. """ return SVG(self._repr_svg_()) def _figure_data(self, format): fig = self.plot() data = print_figure(fig, format) # We MUST close the figure, otherwise IPython's display machinery # will pick it up and send it as output, resulting in a double display plt.close(fig) return data def __str__(self): """Return a string representation of the dissimilarity matrix. Summary includes matrix dimensions, a (truncated) list of IDs, and (truncated) array of dissimilarities. Returns ------- str String representation of the dissimilarity matrix. .. shownumpydoc """ return '%dx%d %s matrix\nIDs:\n%s\nData:\n' % ( self.shape[0], self.shape[1], self._matrix_element_name, _pprint_strs(self.ids)) + str(self.data) def __eq__(self, other): """Compare this dissimilarity matrix to another for equality. Two dissimilarity matrices are equal if they have the same shape, IDs (in the same order!), and have data arrays that are equal. Checks are *not* performed to ensure that `other` is a `DissimilarityMatrix` instance. Parameters ---------- other : DissimilarityMatrix Dissimilarity matrix to compare to for equality. Returns ------- bool ``True`` if `self` is equal to `other`, ``False`` otherwise. .. shownumpydoc """ equal = True # The order these checks are performed in is important to be as # efficient as possible. The check for shape equality is not strictly # necessary as it should be taken care of in np.array_equal, but I'd # rather explicitly bail before comparing IDs or data. Use array_equal # instead of (a == b).all() because of this issue: # http://stackoverflow.com/a/10582030 try: if self.shape != other.shape: equal = False elif self.ids != other.ids: equal = False elif not np.array_equal(self.data, other.data): equal = False except AttributeError: equal = False return equal def __ne__(self, other): """Determine whether two dissimilarity matrices are not equal. Parameters ---------- other : DissimilarityMatrix Dissimilarity matrix to compare to. Returns ------- bool ``True`` if `self` is not equal to `other`, ``False`` otherwise. See Also -------- __eq__ .. shownumpydoc """ return not self == other def __contains__(self, lookup_id): """Check if the specified ID is in the dissimilarity matrix. Parameters ---------- lookup_id : str ID to search for. Returns ------- bool ``True`` if `lookup_id` is in the dissimilarity matrix, ``False`` otherwise. See Also -------- index .. shownumpydoc """ return lookup_id in self._id_index def __getitem__(self, index): """Slice into dissimilarity data by object ID or numpy indexing. Extracts data from the dissimilarity matrix by object ID, a pair of IDs, or numpy indexing/slicing. Parameters ---------- index : str, two-tuple of str, or numpy index `index` can be one of the following forms: an ID, a pair of IDs, or a numpy index. If `index` is a string, it is assumed to be an ID and a ``numpy.ndarray`` row vector is returned for the corresponding ID. Note that the ID's row of dissimilarities is returned, *not* its column. If the matrix is symmetric, the two will be identical, but this makes a difference if the matrix is asymmetric. If `index` is a two-tuple of strings, each string is assumed to be an ID and the corresponding matrix element is returned that represents the dissimilarity between the two IDs. Note that the order of lookup by ID pair matters if the matrix is asymmetric: the first ID will be used to look up the row, and the second ID will be used to look up the column. Thus, ``dm['a', 'b']`` may not be the same as ``dm['b', 'a']`` if the matrix is asymmetric. Otherwise, `index` will be passed through to ``DissimilarityMatrix.data.__getitem__``, allowing for standard indexing of a ``numpy.ndarray`` (e.g., slicing). Returns ------- ndarray or scalar Indexed data, where return type depends on the form of `index` (see description of `index` for more details). Raises ------ MissingIDError If the ID(s) specified in `index` are not in the dissimilarity matrix. Notes ----- The lookup based on ID(s) is quick. .. shownumpydoc """ if isinstance(index, string_types): return self.data[self.index(index)] elif self._is_id_pair(index): return self.data[self.index(index[0]), self.index(index[1])] else: return self.data.__getitem__(index) def _validate(self, data, ids): """Validate the data array and IDs. Checks that the data is at least 1x1 in size, 2D, square, hollow, and contains only floats. Also checks that IDs are unique and that the number of IDs matches the number of rows/cols in the data array. Subclasses can override this method to perform different/more specific validation (e.g., see `DistanceMatrix`). Notes ----- Accepts arguments instead of inspecting instance attributes to avoid creating an invalid dissimilarity matrix before raising an error. Otherwise, the invalid dissimilarity matrix could be used after the exception is caught and handled. """ num_ids = len(ids) if 0 in data.shape: raise DissimilarityMatrixError("Data must be at least 1x1 in " "size.") elif len(data.shape) != 2: raise DissimilarityMatrixError("Data must have exactly two " "dimensions.") elif data.shape[0] != data.shape[1]: raise DissimilarityMatrixError("Data must be square (i.e., have " "the same number of rows and " "columns).") elif data.dtype != np.double: raise DissimilarityMatrixError("Data must contain only floating " "point values.") elif np.trace(data) != 0: raise DissimilarityMatrixError("Data must be hollow (i.e., the " "diagonal can only contain zeros).") elif num_ids != len(set(ids)): raise DissimilarityMatrixError("IDs must be unique.") elif num_ids != data.shape[0]: raise DissimilarityMatrixError("The number of IDs must match the " "number of rows/columns in the " "data.") def _index_list(self, list_): return {id_: idx for idx, id_ in enumerate(list_)} def _is_id_pair(self, index): return (isinstance(index, tuple) and len(index) == 2 and all(map(lambda e: isinstance(e, string_types), index))) class DistanceMatrix(DissimilarityMatrix): """Store distances between objects. A `DistanceMatrix` is a `DissimilarityMatrix` with the additional requirement that the matrix data is symmetric. There are additional methods made available that take advantage of this symmetry. See Also -------- DissimilarityMatrix Notes ----- The distances are stored in redundant (square-form) format [1]_. To facilitate use with other scientific Python routines (e.g., scipy), the distances can be retrieved in condensed (vector-form) format using `condensed_form`. `DistanceMatrix` only requires that the distances it stores are symmetric. Checks are *not* performed to ensure the other three metric properties hold (non-negativity, identity of indiscernibles, and triangle inequality) [2]_. Thus, a `DistanceMatrix` instance can store distances that are not metric. References ---------- .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html .. [2] http://planetmath.org/metricspace """ # Override here, used in superclass __str__ _matrix_element_name = 'distance' def condensed_form(self): """Return an array of distances in condensed format. Returns ------- ndarray One-dimensional ``numpy.ndarray`` of distances in condensed format. Notes ----- Condensed format is described in [1]_. The conversion is not a constant-time operation, though it should be relatively quick to perform. References ---------- .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html """ return squareform(self._data, force='tovector', checks=False) def permute(self, condensed=False): """Randomly permute both rows and columns in the matrix. Randomly permutes the ordering of rows and columns in the matrix. The same permutation is applied to both rows and columns in order to maintain symmetry and hollowness. Only the rows/columns in the distance matrix are permuted; the IDs are *not* permuted. Parameters ---------- condensed : bool, optional If ``True``, return the permuted distance matrix in condensed format. Otherwise, return the permuted distance matrix as a new ``DistanceMatrix`` instance. Returns ------- DistanceMatrix or ndarray Permuted distances as a new ``DistanceMatrix`` or as a ``ndarray`` in condensed format. See Also -------- condensed_form Notes ----- This method does not modify the distance matrix that it is called on. It is more efficient to pass ``condensed=True`` than permuting the distance matrix and then converting to condensed format. """ order = np.random.permutation(self.shape[0]) permuted = self._data[order][:, order] if condensed: return squareform(permuted, force='tovector', checks=False) else: return self.__class__(permuted, self.ids) def _validate(self, data, ids): """Validate the data array and IDs. Overrides the superclass `_validate`. Performs a check for symmetry in addition to the checks performed in the superclass. """ super(DistanceMatrix, self)._validate(data, ids) if (data.T != data).any(): raise DistanceMatrixError("Data must be symmetric.") def randdm(num_objects, ids=None, constructor=None, random_fn=None): """Generate a distance matrix populated with random distances. Using the default `random_fn`, distances are randomly drawn from a uniform distribution over ``[0, 1)``. Regardless of `random_fn`, the resulting distance matrix is guaranteed to be symmetric and hollow. Parameters ---------- num_objects : int The number of objects in the resulting distance matrix. For example, if `num_objects` is 3, a 3x3 distance matrix will be returned. ids : sequence of str or None, optional A sequence of strings to be used as IDs. ``len(ids)`` must be equal to `num_objects`. If not provided, IDs will be monotonically-increasing integers cast as strings (numbering starts at 1). For example, ``('1', '2', '3')``. constructor : type, optional `DissimilarityMatrix` or subclass constructor to use when creating the random distance matrix. The returned distance matrix will be of this type. If ``None`` (the default), a `DistanceMatrix` instance will be returned. random_fn : function, optional Function to generate random values. `random_fn` must accept two arguments (number of rows and number of columns) and return a 2D ``numpy.ndarray`` of floats (or something that can be cast to float). If ``None`` (the default), ``numpy.random.rand`` will be used. Returns ------- DissimilarityMatrix `DissimilarityMatrix` (or subclass) instance of random distances. Type depends on `constructor`. See Also -------- numpy.random.rand """ if constructor is None: constructor = DistanceMatrix if random_fn is None: random_fn = np.random.rand data = np.tril(random_fn(num_objects, num_objects), -1) data += data.T if not ids: ids = map(str, range(1, num_objects + 1)) return constructor(data, ids) # helper functions for anosim and permanova def _preprocess_input(distance_matrix, grouping, column): """Compute intermediate results not affected by permutations. These intermediate results can be computed a single time for efficiency, regardless of grouping vector permutations (i.e., when calculating the p-value). These intermediate results are used by both ANOSIM and PERMANOVA. Also validates and normalizes input (e.g., converting ``DataFrame`` column into grouping vector). """ if not isinstance(distance_matrix, DistanceMatrix): raise TypeError("Input must be a DistanceMatrix.") if isinstance(grouping, pd.DataFrame): if column is None: raise ValueError( "Must provide a column name if supplying a DataFrame.") else: grouping = _df_to_vector(distance_matrix, grouping, column) elif column is not None: raise ValueError( "Must provide a DataFrame if supplying a column name.") sample_size = distance_matrix.shape[0] if len(grouping) != sample_size: raise ValueError( "Grouping vector size must match the number of IDs in the " "distance matrix.") # Find the group labels and convert grouping to an integer vector # (factor). groups, grouping = np.unique(grouping, return_inverse=True) num_groups = len(groups) if num_groups == len(grouping): raise ValueError( "All values in the grouping vector are unique. This method cannot " "operate on a grouping vector with only unique values (e.g., " "there are no 'within' distances because each group of objects " "contains only a single object).") if num_groups == 1: raise ValueError( "All values in the grouping vector are the same. This method " "cannot operate on a grouping vector with only a single group of " "objects (e.g., there are no 'between' distances because there is " "only a single group).") tri_idxs = np.triu_indices(sample_size, k=1) distances = distance_matrix.condensed_form() return sample_size, num_groups, grouping, tri_idxs, distances def _df_to_vector(distance_matrix, df, column): """Return a grouping vector from a ``DataFrame`` column. Parameters ---------- distance_marix : DistanceMatrix Distance matrix whose IDs will be mapped to group labels. df : pandas.DataFrame ``DataFrame`` (indexed by distance matrix ID). column : str Column name in `df` containing group labels. Returns ------- list Grouping vector (vector of labels) based on the IDs in `distance_matrix`. Each ID's label is looked up in the ``DataFrame`` under the column specified by `column`. Raises ------ ValueError If `column` is not in the ``DataFrame``, or a distance matrix ID is not in the ``DataFrame``. """ if column not in df: raise ValueError("Column '%s' not in DataFrame." % column) grouping = df.loc[distance_matrix.ids, column] if grouping.isnull().any(): raise ValueError( "One or more IDs in the distance matrix are not in the data " "frame.") return grouping.tolist() def _run_monte_carlo_stats(test_stat_function, grouping, permutations): """Run stat test and compute significance with Monte Carlo permutations.""" if permutations < 0: raise ValueError( "Number of permutations must be greater than or equal to zero.") stat = test_stat_function(grouping) p_value = np.nan if permutations > 0: perm_stats = np.empty(permutations, dtype=np.float64) for i in range(permutations): perm_grouping = np.random.permutation(grouping) perm_stats[i] = test_stat_function(perm_grouping) p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1) return stat, p_value def _build_results(method_name, test_stat_name, sample_size, num_groups, stat, p_value, permutations): """Return ``pandas.Series`` containing results of statistical test.""" return pd.Series( data=[method_name, test_stat_name, sample_size, num_groups, stat, p_value, permutations], index=['method name', 'test statistic name', 'sample size', 'number of groups', 'test statistic', 'p-value', 'number of permutations'], name='%s results' % method_name) class CategoricalStats(object): """Base class for categorical statistical methods. Categorical statistical methods generally test for significant differences between discrete groups of objects, as determined by a categorical variable (grouping vector). See Also -------- ANOSIM PERMANOVA """ short_method_name = '' long_method_name = '' test_statistic_name = '' def __init__(self, distance_matrix, grouping, column=None): if not isinstance(distance_matrix, DistanceMatrix): raise TypeError("Input must be a DistanceMatrix.") if isinstance(grouping, pd.DataFrame): if column is None: raise ValueError("Must provide a column name if supplying a " "data frame.") else: grouping = self._df_to_vector(distance_matrix, grouping, column) elif column is not None: raise ValueError("Must provide a data frame if supplying a column " "name.") if len(grouping) != distance_matrix.shape[0]: raise ValueError("Grouping vector size must match the number of " "IDs in the distance matrix.") # Find the group labels and convert grouping to an integer vector # (factor). groups, grouping = np.unique(grouping, return_inverse=True) if len(groups) == len(grouping): raise ValueError("All values in the grouping vector are unique. " "This method cannot operate on a grouping vector " "with only unique values (e.g., there are no " "'within' distances because each group of " "objects contains only a single object).") if len(groups) == 1: raise ValueError("All values in the grouping vector are the same. " "This method cannot operate on a grouping vector " "with only a single group of objects (e.g., " "there are no 'between' distances because there " "is only a single group).") self._dm = distance_matrix self._grouping = grouping self._groups = groups self._tri_idxs = np.triu_indices(self._dm.shape[0], k=1) def _df_to_vector(self, distance_matrix, df, column): """Return a grouping vector from a data frame column. Parameters ---------- distance_marix : DistanceMatrix Distance matrix whose IDs will be mapped to group labels. df : pandas.DataFrame ``DataFrame`` (indexed by distance matrix ID). column : str Column name in `df` containing group labels. Returns ------- list Grouping vector (vector of labels) based on the IDs in `distance_matrix`. Each ID's label is looked up in the data frame under the column specified by `column`. Raises ------ ValueError If `column` is not in the data frame, or a distance matrix ID is not in the data frame. """ if column not in df: raise ValueError("Column '%s' not in data frame." % column) grouping = df.loc[distance_matrix.ids, column] if grouping.isnull().any(): raise ValueError("One or more IDs in the distance matrix are not " "in the data frame.") return grouping.tolist() def __call__(self, permutations=999): """Execute the statistical method. Parameters ---------- permutations : int, optional Number of permutations to use when calculating statistical significance. Must be >= 0. If 0, the resulting p-value will be ``None``. Returns ------- CategoricalStatsResults Results of the method, including test statistic and p-value. .. shownumpydoc """ if permutations < 0: raise ValueError("Number of permutations must be greater than or " "equal to zero.") stat = self._run(self._grouping) p_value = None if permutations > 0: perm_stats = np.empty(permutations, dtype=np.float64) for i in range(permutations): perm_grouping = np.random.permutation(self._grouping) perm_stats[i] = self._run(perm_grouping) p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1) return CategoricalStatsResults(self.short_method_name, self.long_method_name, self.test_statistic_name, self._dm.shape[0], self._groups, stat, p_value, permutations) def _run(self, grouping): raise NotImplementedError("Subclasses must implement _run().") class CategoricalStatsResults(object): """Statistical method results container. .. note:: Deprecated in scikit-bio 0.2.1-dev ``CategoricalStatsResults`` will be removed in scikit-bio 0.3.0. It is replaced by ``pandas.Series`` for storing statistical method results. Please update your code to use ``skbio.stats.distance.anosim`` or ``skbio.stats.distance.permanova``, which will return a ``pandas.Series``. Stores the results of running a `CategoricalStats` method a single time, and provides a way to format the results. Attributes ---------- short_method_name long_method_name test_statistic_name sample_size groups statistic p_value permutations Notes ----- Users will generally not directly instantiate objects of this class. The various categorical statistical methods will return an object of this type when they are run. """ def __init__(self, short_method_name, long_method_name, test_statistic_name, sample_size, groups, statistic, p_value, permutations): warnings.warn( "skbio.stats.distance.CategoricalStatsResults is deprecated and " "will be removed in scikit-bio 0.3.0. Please update your code to " "use either skbio.stats.distance.anosim or " "skbio.stats.distance.permanova, which will return a " "pandas.Series object.", DeprecationWarning) self.short_method_name = short_method_name self.long_method_name = long_method_name self.test_statistic_name = test_statistic_name self.sample_size = sample_size self.groups = groups self.statistic = statistic self.p_value = p_value self.permutations = permutations def __str__(self): """Return pretty-print (fixed width) string.""" rows = (self._format_header(), self._format_data()) max_widths = [] for col_idx in range(len(rows[0])): max_widths.append(max(map(lambda e: len(e[col_idx]), rows))) results = [] for row in rows: padded_row = [] for col_idx, val in enumerate(row): padded_row.append(val.rjust(max_widths[col_idx])) results.append(' '.join(padded_row)) return '\n'.join(results) + '\n' def _repr_html_(self): """Return a string containing an HTML table of results. This method will be called within the IPython Notebook instead of __repr__ to display results. """ header = self._format_header() data = self._format_data() return pd.DataFrame([data[1:]], columns=header[1:], index=[data[0]])._repr_html_() def summary(self, delimiter='\t'): """Return a formatted summary of results as a string. The string is formatted as delimited text. Parameters ---------- delimiter : str, optional String to delimit fields by in formatted output. Default is tab (TSV). Returns ------- str Delimited-text summary of results. """ summary = StringIO() csv_writer = csv.writer(summary, delimiter=delimiter, lineterminator='\n') csv_writer.writerow(self._format_header()) csv_writer.writerow(self._format_data()) return summary.getvalue() def _format_header(self): return ('Method name', 'Sample size', 'Number of groups', self.test_statistic_name, 'p-value', 'Number of permutations') def _format_data(self): p_value_str = p_value_to_str(self.p_value, self.permutations) return (self.short_method_name, '%d' % self.sample_size, '%d' % len(self.groups), str(self.statistic), p_value_str, '%d' % self.permutations) ```
[ { "content": "Here is a code snippet:\n```python\n#!/usr/bin/python\r\n\r\n\"\"\"\r\nBenjamin Carr\r\nHomework #2 - MPCS 55001\r\nAnswers:\r\n(1) Program below.\r\n(2) My program is correct for all cases where both the numbers and the distances from the median\r\n are unique. I spent a lot of time (30+ hrs) ...
[ { "content": "Here is a code snippet:\n<|memory_start|>```python\n#!/usr/bin/python\r\n\r\n\"\"\"\r\nBenjamin Carr\r\nHomework #2 - MPCS 55001\r\nAnswers:\r\n(1) Program below.\r\n(2) My program is correct for all cases where both the numbers and the distances from the median\r\n are unique. I spent a lot of...
```python #!/usr/bin/python """ Benjamin Carr Homework #2 - MPCS 55001 Answers: (1) Program below. (2) My program is correct for all cases where both the numbers and the distances from the median are unique. I spent a lot of time (30+ hrs) trying to figure out other ways of doing this other than using a dictionary to store key,val pairs but didn't come up with anything that would work for all situations. So, it works for situations that meet that criteria. It also assumes the median is always equal to floor(n/2), which is a bit of a mathematical compromise. (3) It should run in O(n) time - the worst running time is a function of the O(n) lookup to select() that is initially used to find the median. Both of the core FOR loops (lines 66 & 71) take O(n) as well. """ import sys import math from random import randint def main(): startFindClosest() def startFindClosest(): """Begins the closest search process by reading in the stdin file. Args: None. Reads from stdin for file. Returns: No value. Prints closest k values to median to stdout.""" f = sys.stdin line1 = f.readline() while line1 != '': k = int(f.readline()) array = form_array_from_string_line(line1) print findClosestKValues(array, 0, len(array)-1, k) line1 = f.readline() if not line1: break return def findClosestKValues(array, l_index, r_index, k): """Finds the closest K values to the median. Args: array: List object containing unsorted list of values. k: The number of numbers closest to the median we wish to find. Returns: nums: a list object containing the closest k numbers to median.""" nums = [] temp_array = [] pairing = {} """ Note: This is code I tried to use to get it work for varying lengths to accurately output the median value. It turned out to be more complex than imagined so I left it out. if (len(array) % 2) == 0: median_A = randomizedSelect(array, l_index, r_index, (len(array)/2)) median_B = randomizedSelect(array, l_index, r_index, ((len(array)-1)/2)) median = (median_A + median_B) / 2.0 else: median = randomizedSelect(array, l_index, r_index, (len(array)/2))""" median = randomizedSelect(array, l_index, r_index, math.floor(len(array)/2)) array.remove(median) array.append(median) for i in range(0,r_index+1): pairing[abs(array[i]-median)] = array[i] temp_array.append(abs(array[i] - median)) kth_element = randomizedSelect(temp_array, l_index, len(temp_array)-1, k) for j in range(0,len(array)): if temp_array[j] <= kth_element: nums.append(pairing[temp_array[j]]) return nums def form_array_from_string_line(line): """Begins the inversion count process by reading in the stdin file. Args: line: A string of input line (usually from a text file) with integers contained within, separated by spaces. Returns: array: List object (Python's standard 'array' type) featuring each of the integers as a separate item in the list.""" array = [int(n) for n in line.split()] return array def randomizedSelect(array, l_index, r_index, i): """Uses the randomizedPartion method to find the specified i-th value. Args: array: List object containing unsorted list of values. l_index: Left index of the subarray we want to search in. r_index: Right index of the subarray we want to search in. i: The i-th sorted value we want to find. Returns: array: List object (Python's standard 'array' type) featuring each of the integers as a separate item in the list.""" if l_index == r_index: return array[l_index] q = randomizedPartition(array, l_index, r_index) k = q - l_index + 1 if i == k: return array[q] elif i < k: return randomizedSelect(array, l_index, q-1, i) else: return randomizedSelect(array, q+1, r_index, i-k) def randomizedPartition(array, l_index, r_index): """Randomizes the partion method. Args: array: List object containing unsorted list of values. l_index: Left index of the subarray we want to search in. r_index: Right index of the subarray we want to search in. Returns: i+1: Integer value of the index of the partition.""" i = randint(l_index, r_index) array = valueSwap(array, i, r_index) return partition(array, l_index, r_index) def partition(array, l_index, r_index): """Identifies the partion index. Args: array: List object containing unsorted list of values. l_index: Left index of the subarray we want to search in. r_index: Right index of the subarray we want to search in. Returns: i+1: Integer value of the index of the partition.""" pivot = array[r_index] i = l_index - 1 j = l_index for j in range(l_index, r_index): if array[j] <= pivot: i += 1 array = valueSwap(array, i, j) array = valueSwap(array, i+1, r_index) return i+1 def valueSwap(array, index_one, index_two): """Swaps two values in a given array. Args: array: List object containing unsorted list of values. index_one: Index of first item we want to swap. index_two: Index of second item we want to swap. Returns: array: List with the desired values swapped.""" if len(array) <= 1: return array else: try: temp = array[index_one] array[index_one] = array[index_two] array[index_two] = temp except IndexError, e: print e print "Tried to swap index: " + str(index_one) + ' with index: ' + str(index_two) return array if __name__ == '__main__': main() ```
[ { "content": "Repeat the code precisely:\n```python\n# -*- coding: utf-8 -*-\n\n# Automatic provisioning of AWS S3 buckets.\n\nimport time\nimport botocore\nimport boto3\nimport nixops.util\nimport nixops.resources\nimport nixops.ec2_utils\n\n\nclass S3BucketDefinition(nixops.resources.ResourceDefinition):\n ...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# Automatic provisioning of AWS S3 buckets.\n\nimport time\nimport botocore\nimport boto3\nimport nixops.util\nimport nixops.resources\nimport nixops.ec2_utils\n\n\nclass S3BucketDefinition(nixops.resources.ResourceD...
```python # -*- coding: utf-8 -*- # Automatic provisioning of AWS S3 buckets. import time import botocore import boto3 import nixops.util import nixops.resources import nixops.ec2_utils class S3BucketDefinition(nixops.resources.ResourceDefinition): """Definition of an S3 bucket.""" @classmethod def get_type(cls): return "s3-bucket" @classmethod def get_resource_type(cls): return "s3Buckets" def __init__(self, xml, config={}): nixops.resources.ResourceDefinition.__init__(self, xml, config) self.bucket_name = xml.find("attrs/attr[@name='name']/string").get("value") self.region = xml.find("attrs/attr[@name='region']/string").get("value") self.access_key_id = xml.find("attrs/attr[@name='accessKeyId']/string").get("value") self.policy = xml.find("attrs/attr[@name='policy']/string").get("value") self.website_enabled = self.config["website"]["enabled"] self.website_suffix = self.config["website"]["suffix"] self.website_error_document = self.config["website"]["errorDocument"] def show_type(self): return "{0} [{1}]".format(self.get_type(), self.region) class S3BucketState(nixops.resources.ResourceState): """State of an S3 bucket.""" state = nixops.util.attr_property("state", nixops.resources.ResourceState.MISSING, int) bucket_name = nixops.util.attr_property("ec2.bucketName", None) access_key_id = nixops.util.attr_property("ec2.accessKeyId", None) region = nixops.util.attr_property("ec2.region", None) @classmethod def get_type(cls): return "s3-bucket" def __init__(self, depl, name, id): nixops.resources.ResourceState.__init__(self, depl, name, id) self._conn = None def show_type(self): s = super(S3BucketState, self).show_type() if self.region: s = "{0} [{1}]".format(s, self.region) return s @property def resource_id(self): return self.bucket_name def get_definition_prefix(self): return "resources.s3Buckets." def connect(self): if self._conn: return (access_key_id, secret_access_key) = nixops.ec2_utils.fetch_aws_secret_key(self.access_key_id) self._conn = boto3.session.Session(region_name=self.region if self.region != "US" else "us-east-1", aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) def create(self, defn, check, allow_reboot, allow_recreate): self.access_key_id = defn.access_key_id or nixops.ec2_utils.get_access_key_id() if not self.access_key_id: raise Exception("please set ‘accessKeyId’, $EC2_ACCESS_KEY or $AWS_ACCESS_KEY_ID") if len(defn.bucket_name) > 63: raise Exception("bucket name ‘{0}’ is longer than 63 characters.".format(defn.bucket_name)) self.connect() s3client = self._conn.client('s3') if check or self.state != self.UP: self.log("creating S3 bucket ‘{0}’...".format(defn.bucket_name)) try: ACL = 'private' # ..or: public-read, public-read-write, authenticated-read s3loc = region_to_s3_location(defn.region) if s3loc == "US": s3client.create_bucket(ACL = ACL, Bucket = defn.bucket_name) else: s3client.create_bucket(ACL = ACL, Bucket = defn.bucket_name, CreateBucketConfiguration = { 'LocationConstraint': s3loc }) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] != "BucketAlreadyOwnedByYou": raise with self.depl._db: self.state = self.UP self.bucket_name = defn.bucket_name self.region = defn.region if defn.policy: self.log("setting S3 bucket policy on ‘{0}’...".format(defn.bucket_name)) s3client.put_bucket_policy(Bucket = defn.bucket_name, Policy = defn.policy.strip()) else: try: s3client.delete_bucket_policy(Bucket = defn.bucket_name) except botocore.exceptions.ClientError as e: # This seems not to happen - despite docs indicating it should: # [http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html] if e.response['ResponseMetadata']['HTTPStatusCode'] != 204: raise # (204 : Bucket didn't have any policy to delete) if not defn.website_enabled: try: s3client.delete_bucket_website(Bucket = defn.bucket_name) except botocore.exceptions.ClientError as e: if e.response['ResponseMetadata']['HTTPStatusCode'] != 204: raise else: website_config = { 'IndexDocument': { 'Suffix': defn.website_suffix } } if defn.website_error_document != "": website_config['ErrorDocument'] = { 'Key': defn.website_error_document} s3client.put_bucket_website(Bucket = defn.bucket_name, WebsiteConfiguration = website_config) def destroy(self, wipe=False): if self.state == self.UP: self.connect() try: self.log("destroying S3 bucket ‘{0}’...".format(self.bucket_name)) bucket = self._conn.resource('s3').Bucket(self.bucket_name) try: bucket.delete() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] != "BucketNotEmpty": raise if not self.depl.logger.confirm("are you sure you want to destroy S3 bucket ‘{0}’?".format(self.bucket_name)): return False bucket.objects.all().delete() bucket.delete() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] != "NoSuchBucket": raise return True def region_to_s3_location(region): # S3 location names are identical to EC2 regions, except for # us-east-1 and eu-west-1. if region == "eu-west-1": return "EU" elif region == "us-east-1": return "US" else: return region ```
[ { "content": "Write the code verbatim:\n```python\n#!/usr/bin/python\nimport sys\nfrom klampt import *\nfrom klampt import vis\nfrom klampt.vis.glrobotprogram import GLSimulationPlugin\nimport robotiq \n\n\nclass MyGLViewer(GLSimulationPlugin):\n def __init__(self,world):\n GLSimulationPlugin.__init__...
[ { "content": "Write the code verbatim:\n<|memory_start|>```python\n#!/usr/bin/python\nimport sys\nfrom klampt import *\nfrom klampt import vis\nfrom klampt.vis.glrobotprogram import GLSimulationPlugin\nimport robotiq \n\n\nclass MyGLViewer(GLSimulationPlugin):\n def __init__(self,world):\n GLSimulatio...
```python #!/usr/bin/python import sys from klampt import * from klampt import vis from klampt.vis.glrobotprogram import GLSimulationPlugin import robotiq class MyGLViewer(GLSimulationPlugin): def __init__(self,world): GLSimulationPlugin.__init__(self,world) #Put any controller modules or sensor / actuator emulators here self.robotiqEmulator = robotiq.Emulator(self.sim) self.sim.addEmulator(0,self.robotiqEmulator) def control_loop(self): #Put your control handler here #right now, just sets g to an oscillation between 0 and 199 #TODO: build a BaseController that outputs qcmd to the emulator g = int(self.sim.getTime()*50.0) maxval = 120 if int(g/maxval)%2 == 1: g = maxval-1 - g%maxval else: g = g % maxval #print g g = [g,g,g] self.robotiqEmulator.send_command(g,scissor=30) if __name__ == "__main__": print """robotiqtest.py: A program to test the behavior of the RobotiQ emulator. Right now it just opens and closes the gripper repeatedly. Press s to toggle simulation.""" world = WorldModel() if not world.readFile('robotiq.xml'): print "robotiq.xml couldn't be read, exiting" exit(1) viewer = MyGLViewer(world) vis.setWindowTitle("Robotiq gripper test") vis.run(viewer) ```
[ { "content": "Here is the code content:\n```python\n#!python\n\"\"\"Bootstrap setuptools installation\n\nIf you want to use setuptools in your package's setup.py, just include this\nfile in the same directory with it, and add this to the top of your setup.py::\n\n from ez_setup import use_setuptools\n use...
[ { "content": "Here is the code content:\n<|memory_start|>```python\n#!python\n\"\"\"Bootstrap setuptools installation\n\nIf you want to use setuptools in your package's setup.py, just include this\nfile in the same directory with it, and add this to the top of your setup.py::\n\n from ez_setup import use_set...
```python #!python """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import os import shutil import sys import tempfile import tarfile import optparse import subprocess import platform from distutils import log try: from site import USER_SITE except ImportError: USER_SITE = None DEFAULT_VERSION = "1.1.6" DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/" def _python_cmd(*args): args = (sys.executable,) + args return subprocess.call(args) == 0 def _check_call_py24(cmd, *args, **kwargs): res = subprocess.call(cmd, *args, **kwargs) class CalledProcessError(Exception): pass if not res == 0: msg = "Command '%s' return non-zero exit status %d" % (cmd, res) raise CalledProcessError(msg) vars(subprocess).setdefault('check_call', _check_call_py24) def _install(tarball, install_args=()): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # installing log.warn('Installing Setuptools') if not _python_cmd('setup.py', 'install', *install_args): log.warn('Something went wrong during the installation.') log.warn('See the error message above.') # exitcode will be 2 return 2 finally: os.chdir(old_wd) shutil.rmtree(tmpdir) def _build_egg(egg, tarball, to_dir): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # building an egg log.warn('Building a Setuptools egg in %s', to_dir) _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) finally: os.chdir(old_wd) shutil.rmtree(tmpdir) # returning the result log.warn(egg) if not os.path.exists(egg): raise IOError('Could not build the egg.') def _do_download(version, download_base, to_dir, download_delay): egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg' % (version, sys.version_info[0], sys.version_info[1])) if not os.path.exists(egg): tarball = download_setuptools(version, download_base, to_dir, download_delay) _build_egg(egg, tarball, to_dir) sys.path.insert(0, egg) # Remove previously-imported pkg_resources if present (see # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). if 'pkg_resources' in sys.modules: del sys.modules['pkg_resources'] import setuptools setuptools.bootstrap_install_from = egg def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15): # making sure we use the absolute path to_dir = os.path.abspath(to_dir) was_imported = 'pkg_resources' in sys.modules or \ 'setuptools' in sys.modules try: import pkg_resources except ImportError: return _do_download(version, download_base, to_dir, download_delay) try: pkg_resources.require("setuptools>=" + version) return except pkg_resources.VersionConflict: e = sys.exc_info()[1] if was_imported: sys.stderr.write( "The required version of setuptools (>=%s) is not available,\n" "and can't be installed while this script is running. Please\n" "install a more recent version first, using\n" "'easy_install -U setuptools'." "\n\n(Currently using %r)\n" % (version, e.args[0])) sys.exit(2) else: del pkg_resources, sys.modules['pkg_resources'] # reload ok return _do_download(version, download_base, to_dir, download_delay) except pkg_resources.DistributionNotFound: return _do_download(version, download_base, to_dir, download_delay) def download_file_powershell(url, target): """ Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete. """ target = os.path.abspath(target) cmd = [ 'powershell', '-Command', "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(), ] subprocess.check_call(cmd) def has_powershell(): if platform.system() != 'Windows': return False cmd = ['powershell', '-Command', 'echo test'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_powershell.viable = has_powershell def download_file_curl(url, target): cmd = ['curl', url, '--silent', '--output', target] subprocess.check_call(cmd) def has_curl(): cmd = ['curl', '--version'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_curl.viable = has_curl def download_file_wget(url, target): cmd = ['wget', url, '--quiet', '--output-document', target] subprocess.check_call(cmd) def has_wget(): cmd = ['wget', '--version'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return False download_file_wget.viable = has_wget def download_file_insecure(url, target): """ Use Python to download the file, even though it cannot authenticate the connection. """ try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen src = dst = None try: src = urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = src.read() dst = open(target, "wb") dst.write(data) finally: if src: src.close() if dst: dst.close() download_file_insecure.viable = lambda: True def get_best_downloader(): downloaders = [ download_file_powershell, download_file_curl, download_file_wget, download_file_insecure, ] for dl in downloaders: if dl.viable(): return dl def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. ``downloader_factory`` should be a function taking no arguments and returning a function for downloading a URL to a target. """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) tgz_name = "setuptools-%s.tar.gz" % version url = download_base + tgz_name saveto = os.path.join(to_dir, tgz_name) if not os.path.exists(saveto): # Avoid repeated downloads log.warn("Downloading %s", url) downloader = downloader_factory() downloader(url, saveto) return os.path.realpath(saveto) def _extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ import copy import operator from tarfile import ExtractError directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 448 # decimal for oct 0700 self.extract(tarinfo, path) # Reverse sort directories. if sys.version_info < (2, 4): def sorter(dir1, dir2): return cmp(dir1.name, dir2.name) directories.sort(sorter) directories.reverse() else: directories.sort(key=operator.attrgetter('name'), reverse=True) # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError: e = sys.exc_info()[1] if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def _build_install_args(options): """ Build the arguments to 'python setup.py install' on the setuptools package """ install_args = [] if options.user_install: if sys.version_info < (2, 6): log.warn("--user requires Python 2.6 or later") raise SystemExit(1) install_args.append('--user') return install_args def _parse_args(): """ Parse the command line for options """ parser = optparse.OptionParser() parser.add_option( '--user', dest='user_install', action='store_true', default=False, help='install in user site package (requires Python 2.6 or later)') parser.add_option( '--download-base', dest='download_base', metavar="URL", default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option( '--insecure', dest='downloader_factory', action='store_const', const=lambda: download_file_insecure, default=get_best_downloader, help='Use internal, non-validating downloader' ) options, args = parser.parse_args() # positional arguments are ignored return options def main(version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" options = _parse_args() tarball = download_setuptools(download_base=options.download_base, downloader_factory=options.downloader_factory) return _install(tarball, _build_install_args(options)) if __name__ == '__main__': sys.exit(main()) ```
[ { "content": "Recreate the entire code block with identical formatting:\n```python\n#!/usr/bin/env python\n\"\"\"\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ...
[ { "content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n#!/usr/bin/env python\n\"\"\"\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nrega...
```python #!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ambari Agent """ import os, sys from mock.mock import patch from mock.mock import MagicMock from unittest import TestCase from resource_management import * from resource_management import Script from ambari_commons.os_check import OSCheck from update_repo import UpdateRepo class TestUpdateRepo(TestCase): @patch.object(OSCheck, "is_suse_family") @patch.object(OSCheck, "is_ubuntu_family") @patch.object(OSCheck, "is_redhat_family") @patch.object(Script, 'get_config') @patch("resource_management.libraries.providers.repository.File") @patch("resource_management.libraries.script.Script.put_structured_out") @patch.object(System, "os_family", new='redhat') def testUpdateRepo(self, structured_out_mock, file_mock, mock_config, is_redhat_mock, is_ubuntu_mock, is_suse_mock): ###### valid case is_suse_mock.return_value = False is_ubuntu_mock.return_value = False is_redhat_mock.return_value = True updateRepo = UpdateRepo() mock_config.return_value = { "configurations": { "cluster-env": { "repo_suse_rhel_template": "REPO_SUSE_RHEL_TEST_TEMPLATE", "repo_ubuntu_template": "REPO_UBUNTU_TEMPLATE" } }, "repositoryFile": { "resolved": True, "repoVersion": "2.4.3.0-227", "repositories": [ { "mirrorsList": None, "ambariManaged": True, "baseUrl": "http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.4.3.0/", "repoName": "HDP", "components": None, "osType": "redhat6", "distribution": None, "repoId": "HDP-2.4-repo-1" }, { "mirrorsList": None, "ambariManaged": True, "baseUrl": "http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6", "repoName": "HDP-UTILS", "components": None, "osType": "redhat6", "distribution": None, "repoId": "HDP-UTILS-1.1.0.20-repo-1" } ], "feature": { "m_isScoped": False, "m_isPreInstalled": False }, "stackName": "HDP", "repoVersionId": 1 }, } with Environment('/') as env: updateRepo.actionexecute(None) self.assertTrue(file_mock.called) self.assertEquals(file_mock.call_args[0][0], "/etc/yum.repos.d/HDP.repo") self.assertEquals(structured_out_mock.call_args[0][0], {'repo_update': {'message': 'Repository files successfully updated!', 'exit_code': 0}}) ###### invalid repo info file_mock.reset_mock() failed = False mock_config.return_value = { "configurations": { "clugit ster-env": { "repo_suse_rhel_template": "REPO_SUSE_RHEL_TEST_TEMPLATE", "repo_ubuntu_template": "REPO_UBUNTU_TEMPLATE" } }, "repositoryFile": {} } try: with Environment('/') as env: updateRepo.actionexecute(None) except Exception, exception: failed = True self.assertFalse(file_mock.called) self.assertTrue(failed) ```
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\nimport setuptools\n\nwith open('VERSION.txt', 'r') as f:\n version = f.read().strip()\n\nsetuptools.setup(\n name=\"odoo12-addons-oca-purchase-workflow\",\n description=\"Meta package for oca-purchase-workflow...
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\nimport setuptools\n\nwith open('VERSION.txt', 'r') as f:\n version = f.read().strip()\n\nsetuptools.setup(\n name=\"odoo12-addons-oca-purchase-workflow\",\n description=\"Meta package for oca-p...
```python import setuptools with open('VERSION.txt', 'r') as f: version = f.read().strip() setuptools.setup( name="odoo12-addons-oca-purchase-workflow", description="Meta package for oca-purchase-workflow Odoo addons", version=version, install_requires=[ 'odoo12-addon-procurement_purchase_no_grouping', 'odoo12-addon-product_form_purchase_link', 'odoo12-addon-product_supplier_code_purchase', 'odoo12-addon-purchase_allowed_product', 'odoo12-addon-purchase_analytic_global', 'odoo12-addon-purchase_blanket_order', 'odoo12-addon-purchase_commercial_partner', 'odoo12-addon-purchase_date_planned_manual', 'odoo12-addon-purchase_default_terms_conditions', 'odoo12-addon-purchase_delivery_split_date', 'odoo12-addon-purchase_deposit', 'odoo12-addon-purchase_discount', 'odoo12-addon-purchase_exception', 'odoo12-addon-purchase_force_invoiced', 'odoo12-addon-purchase_invoice_plan', 'odoo12-addon-purchase_landed_cost', 'odoo12-addon-purchase_last_price_info', 'odoo12-addon-purchase_line_procurement_group', 'odoo12-addon-purchase_location_by_line', 'odoo12-addon-purchase_manual_delivery', 'odoo12-addon-purchase_minimum_amount', 'odoo12-addon-purchase_open_qty', 'odoo12-addon-purchase_order_analytic_search', 'odoo12-addon-purchase_order_approval_block', 'odoo12-addon-purchase_order_approved', 'odoo12-addon-purchase_order_archive', 'odoo12-addon-purchase_order_general_discount', 'odoo12-addon-purchase_order_line_deep_sort', 'odoo12-addon-purchase_order_line_description', 'odoo12-addon-purchase_order_line_price_history', 'odoo12-addon-purchase_order_line_price_history_discount', 'odoo12-addon-purchase_order_line_sequence', 'odoo12-addon-purchase_order_line_stock_available', 'odoo12-addon-purchase_order_product_recommendation', 'odoo12-addon-purchase_order_product_recommendation_brand', 'odoo12-addon-purchase_order_product_recommendation_secondary_unit', 'odoo12-addon-purchase_order_secondary_unit', 'odoo12-addon-purchase_order_type', 'odoo12-addon-purchase_order_uninvoiced_amount', 'odoo12-addon-purchase_picking_state', 'odoo12-addon-purchase_price_recalculation', 'odoo12-addon-purchase_product_usage', 'odoo12-addon-purchase_propagate_qty', 'odoo12-addon-purchase_quick', 'odoo12-addon-purchase_reception_notify', 'odoo12-addon-purchase_reception_status', 'odoo12-addon-purchase_request', 'odoo12-addon-purchase_request_department', 'odoo12-addon-purchase_request_order_approved', 'odoo12-addon-purchase_request_product_usage', 'odoo12-addon-purchase_request_tier_validation', 'odoo12-addon-purchase_request_usage_department', 'odoo12-addon-purchase_requisition_auto_rfq', 'odoo12-addon-purchase_requisition_line_description', 'odoo12-addon-purchase_requisition_tier_validation', 'odoo12-addon-purchase_security', 'odoo12-addon-purchase_stock_price_unit_sync', 'odoo12-addon-purchase_stock_return_request', 'odoo12-addon-purchase_tier_validation', 'odoo12-addon-purchase_triple_discount', 'odoo12-addon-purchase_work_acceptance', 'odoo12-addon-subcontracted_service', 'odoo12-addon-supplier_calendar', ], classifiers=[ 'Programming Language :: Python', 'Framework :: Odoo', ] ) ```
[ { "content": "Repeat the code exactly as the original, including blank lines:\n```python\nimport os\nfrom setuptools import setup\n\nREADME = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__f...
[ { "content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\nimport os\nfrom setuptools import setup\n\nREADME = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os....
```python import os from setuptools import setup README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='django-rahit-user', version='0.0.1', description='Registration and Auth module', long_description=README, author='Tahsin Hassan Rahit', author_email='tahsin.rahit@gmail.com', url='http://github.com/rahit/django-rahit-user', packages=['mymen'], install_requires=['Django >=1.5'], include_package_data=True, license='MIT License', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', ], ) ```
[ { "content": "```python\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distrib...
[ { "content": "<|memory_start|>```python\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This pr...
```python # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See LICENSE for more details. # # Copyright (c) 2017 ScyllaDB # pylint: disable=too-many-lines import itertools import os import logging import random import socket import time import datetime import errno import threading import select import shutil import copy from functools import wraps from enum import Enum from collections import defaultdict import concurrent.futures from concurrent.futures import ThreadPoolExecutor from urlparse import urlparse import hashlib import boto3 import libcloud.storage.providers import libcloud.storage.types from libcloud.compute.providers import get_driver from libcloud.compute.types import Provider LOGGER = logging.getLogger('utils') def _remote_get_hash(remoter, file_path): try: result = remoter.run('md5sum {}'.format(file_path), verbose=True) return result.stdout.strip().split()[0] except Exception as details: # pylint: disable=broad-except LOGGER.error(str(details)) return None def _remote_get_file(remoter, src, dst, user_agent=None): cmd = 'curl -L {} -o {}'.format(src, dst) if user_agent: cmd += ' --user-agent %s' % user_agent return remoter.run(cmd, ignore_status=True) def remote_get_file(remoter, src, dst, hash_expected=None, retries=1, user_agent=None): # pylint: disable=too-many-arguments _remote_get_file(remoter, src, dst, user_agent) if not hash_expected: return while retries > 0 and _remote_get_hash(remoter, dst) != hash_expected: _remote_get_file(remoter, src, dst, user_agent) retries -= 1 assert _remote_get_hash(remoter, dst) == hash_expected class retrying(object): # pylint: disable=invalid-name,too-few-public-methods """ Used as a decorator to retry function run that can possibly fail with allowed exceptions list """ def __init__(self, n=3, sleep_time=1, allowed_exceptions=(Exception,), message=""): assert n > 0, "Number of retries parameter should be greater then 0 (current: %s)" % n self.n = n # number of times to retry # pylint: disable=invalid-name self.sleep_time = sleep_time # number seconds to sleep between retries self.allowed_exceptions = allowed_exceptions # if Exception is not allowed will raise self.message = message # string that will be printed between retries def __call__(self, func): @wraps(func) def inner(*args, **kwargs): if self.n == 1: # there is no need to retry return func(*args, **kwargs) for i in xrange(self.n): try: if self.message: LOGGER.info("%s [try #%s]", self.message, i) return func(*args, **kwargs) except self.allowed_exceptions as ex: LOGGER.debug("'%s': failed with '%r', retrying [#%s]", func.func_name, ex, i) time.sleep(self.sleep_time) if i == self.n - 1: LOGGER.error("'%s': Number of retries exceeded!", func.func_name) raise return inner def log_run_info(arg): """ Decorator that prints BEGIN before the function runs and END when function finished running. Uses function name as a name of action or string that can be given to the decorator. If the function is a method of a class object, the class name will be printed out. Usage examples: @log_run_info def foo(x, y=1): pass In: foo(1) Out: BEGIN: foo END: foo (ran 0.000164)s @log_run_info("Execute nemesis") def disrupt(): pass In: disrupt() Out: BEGIN: Execute nemesis END: Execute nemesis (ran 0.000271)s """ def _inner(func, msg=None): @wraps(func) def inner(*args, **kwargs): class_name = "" if args and func.__name__ in dir(args[0]): class_name = " <%s>" % args[0].__class__.__name__ action = "%s%s" % (msg, class_name) start_time = datetime.datetime.now() LOGGER.debug("BEGIN: %s", action) res = func(*args, **kwargs) end_time = datetime.datetime.now() LOGGER.debug("END: %s (ran %ss)", action, (end_time - start_time).total_seconds()) return res return inner if callable(arg): # when decorator is used without a string message return _inner(arg, arg.__name__) else: return lambda f: _inner(f, arg) class Distro(Enum): UNKNOWN = 0 CENTOS7 = 1 RHEL7 = 2 UBUNTU14 = 3 UBUNTU16 = 4 UBUNTU18 = 5 DEBIAN8 = 6 DEBIAN9 = 7 def get_data_dir_path(*args): import sdcm sdcm_path = os.path.realpath(sdcm.__path__[0]) data_dir = os.path.join(sdcm_path, "../data_dir", *args) return os.path.abspath(data_dir) def get_job_name(): return os.environ.get('JOB_NAME', 'local_run') def verify_scylla_repo_file(content, is_rhel_like=True): LOGGER.info('Verifying Scylla repo file') if is_rhel_like: body_prefix = ['#', '[scylla', 'name=', 'baseurl=', 'enabled=', 'gpgcheck=', 'type=', 'skip_if_unavailable=', 'gpgkey=', 'repo_gpgcheck=', 'enabled_metadata='] else: body_prefix = ['#', 'deb'] for line in content.split('\n'): valid_prefix = False for prefix in body_prefix: if line.startswith(prefix) or not line.strip(): valid_prefix = True break LOGGER.debug(line) assert valid_prefix, 'Repository content has invalid line: {}'.format(line) def remove_comments(data): """Remove comments line from data Remove any string which is start from # in data Arguments: data {str} -- data expected the command output, file contents """ return '\n'.join([i.strip() for i in data.split('\n') if not i.startswith('#')]) class S3Storage(object): bucket_name = 'cloudius-jenkins-test' enable_multipart_threshold_size = 1024 * 1024 * 1024 # 1GB multipart_chunksize = 50 * 1024 * 1024 # 50 MB num_download_attempts = 5 def __init__(self, bucket=None): if bucket: self.bucket_name = bucket self._bucket = boto3.resource("s3").Bucket(name=self.bucket_name) self.transfer_config = boto3.s3.transfer.TransferConfig(multipart_threshold=self.enable_multipart_threshold_size, multipart_chunksize=self.multipart_chunksize, num_download_attempts=self.num_download_attempts) def get_s3_fileojb(self, key): objects = [] for obj in self._bucket.objects.filter(Prefix=key): objects.append(obj) return objects def search_by_path(self, path=''): files = [] for obj in self._bucket.objects.filter(Prefix=path): files.append(obj.key) return files def generate_url(self, file_path, dest_dir=''): bucket_name = self.bucket_name file_name = os.path.basename(os.path.normpath(file_path)) return "https://{bucket_name}.s3.amazonaws.com/{dest_dir}/{file_name}".format(dest_dir=dest_dir, file_name=file_name, bucket_name=bucket_name) def upload_file(self, file_path, dest_dir=''): s3_url = self.generate_url(file_path, dest_dir) s3_obj = "{}/{}".format(dest_dir, os.path.basename(file_path)) try: LOGGER.info("Uploading '{file_path}' to {s3_url}".format(file_path=file_path, s3_url=s3_url)) print "Uploading '{file_path}' to {s3_url}".format(file_path=file_path, s3_url=s3_url) self._bucket.upload_file(Filename=file_path, Key=s3_obj, Config=self.transfer_config) LOGGER.info("Uploaded to {0}".format(s3_url)) LOGGER.info("Set public read access") self.set_public_access(key=s3_obj) return s3_url except Exception as details: # pylint: disable=broad-except LOGGER.debug("Unable to upload to S3: %s", details) return "" def set_public_access(self, key): acl_obj = boto3.resource('s3').ObjectAcl(self.bucket_name, key) grants = copy.deepcopy(acl_obj.grants) grantees = { 'Grantee': { "Type": "Group", "URI": "http://acs.amazonaws.com/groups/global/AllUsers" }, 'Permission': "READ" } grants.append(grantees) acl_obj.put(ACL='', AccessControlPolicy={'Grants': grants, 'Owner': acl_obj.owner}) def download_file(self, link, dst_dir): key_name = link.replace("https://{0.bucket_name}.s3.amazonaws.com/".format(self), "") file_name = os.path.basename(key_name) try: LOGGER.info("Downloading {0} from {1}".format(key_name, self.bucket_name)) self._bucket.download_file(Key=key_name, Filename=os.path.join(dst_dir, file_name), Config=self.transfer_config) LOGGER.info("Downloaded finished") return os.path.join(os.path.abspath(dst_dir), file_name) except Exception as details: # pylint: disable=broad-except LOGGER.warning("File {} is not downloaded by reason: {}".format(key_name, details)) return "" def get_latest_gemini_version(): bucket_name = 'downloads.scylladb.com' results = S3Storage(bucket_name).search_by_path(path='gemini') versions = set() for result_file in results: versions.add(result_file.split('/')[1]) return str(sorted(versions)[-1]) def list_logs_by_test_id(test_id): log_types = ['db-cluster', 'monitor-set', 'loader-set', 'sct-runner', 'prometheus', 'grafana', 'job', 'monitoring_data_stack', 'events'] results = [] if not test_id: return results def convert_to_date(date_str): try: t = datetime.datetime.strptime(date_str, "%Y%m%d_%H%M%S") # pylint: disable=invalid-name except ValueError: try: t = datetime.datetime.strptime(date_str, "%Y_%m_%d_%H_%M_%S") # pylint: disable=invalid-name except ValueError: t = datetime.datetime(1999, 1, 1, 1, 1, 1) # pylint: disable=invalid-name return t # pylint: disable=invalid-name log_files = S3Storage().search_by_path(path=test_id) for log_file in log_files: for log_type in log_types: if log_type in log_file: results.append({"file_path": log_file, "type": log_type, "link": "https://{}.s3.amazonaws.com/{}".format(S3Storage.bucket_name, log_file), "date": convert_to_date(log_file.split('/')[1]) }) break results = sorted(results, key=lambda x: x["date"]) return results def all_aws_regions(): client = boto3.client('ec2') return [region['RegionName'] for region in client.describe_regions()['Regions']] AWS_REGIONS = all_aws_regions() class ParallelObject(object): # pylint: disable=too-few-public-methods """ Run function in with supplied args in parallel using thread. """ def __init__(self, objects, timeout=6, num_workers=None, disable_logging=False): self.objects = objects self.timeout = timeout self.num_workers = num_workers self.disable_logging = disable_logging def run(self, func): def func_wrap(fun): def inner(*args, **kwargs): thread_name = threading.current_thread().name fun_args = args fun_kwargs = kwargs fun_name = fun.__name__ LOGGER.debug("[{thread_name}] {fun_name}({fun_args}, {fun_kwargs})".format(thread_name=thread_name, fun_name=fun_name, fun_args=fun_args, fun_kwargs=fun_kwargs)) return_val = fun(*args, **kwargs) LOGGER.debug("[{thread_name}] Done.".format(thread_name=thread_name)) return return_val return inner with ThreadPoolExecutor(max_workers=self.num_workers) as pool: LOGGER.debug("Executing in parallel: '{}' on {}".format(func.__name__, self.objects)) if not self.disable_logging: func = func_wrap(func) return list(pool.map(func, self.objects, timeout=self.timeout)) def clean_cloud_instances(tags_dict): """ Remove all instances with specific tags from both AWS/GCE :param tags_dict: a dict of the tag to select the instances,e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"} :return: None """ clean_instances_aws(tags_dict) clean_elastic_ips_aws(tags_dict) clean_instances_gce(tags_dict) def aws_tags_to_dict(tags_list): tags_dict = {} if tags_list: for item in tags_list: tags_dict[item["Key"]] = item["Value"] return tags_dict def list_instances_aws(tags_dict=None, region_name=None, running=False, group_as_region=False, verbose=False): """ list all instances with specific tags AWS :param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"} :param region_name: name of the region to list :param running: get all running instances :param group_as_region: if True the results would be grouped into regions :param verbose: if True will log progress information :return: instances dict where region is a key """ instances = {} aws_regions = [region_name] if region_name else AWS_REGIONS def get_instances(region): if verbose: LOGGER.info('Going to list aws region "%s"', region) time.sleep(random.random()) client = boto3.client('ec2', region_name=region) custom_filter = [] if tags_dict: custom_filter = [{'Name': 'tag:{}'.format(key), 'Values': [value]} for key, value in tags_dict.items()] response = client.describe_instances(Filters=custom_filter) instances[region] = [instance for reservation in response['Reservations'] for instance in reservation[ 'Instances']] if verbose: LOGGER.info("%s: done [%s/%s]", region, len(instances.keys()), len(aws_regions)) ParallelObject(aws_regions, timeout=100).run(get_instances) for curr_region_name in instances: if running: instances[curr_region_name] = [i for i in instances[curr_region_name] if i['State']['Name'] == 'running'] else: instances[curr_region_name] = [i for i in instances[curr_region_name] if not i['State']['Name'] == 'terminated'] if not group_as_region: instances = list(itertools.chain(*instances.values())) # flatten the list of lists total_items = len(instances) else: total_items = sum([len(value) for _, value in instances.items()]) if verbose: LOGGER.info("Found total of %s instances.", len(total_items)) return instances def clean_instances_aws(tags_dict): """ Remove all instances with specific tags AWS :param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"} :return: None """ assert tags_dict, "tags_dict not provided (can't clean all instances)" aws_instances = list_instances_aws(tags_dict=tags_dict, group_as_region=True) for region, instance_list in aws_instances.items(): client = boto3.client('ec2', region_name=region) for instance in instance_list: tags = aws_tags_to_dict(instance.get('Tags')) name = tags.get("Name", "N/A") instance_id = instance['InstanceId'] LOGGER.info("Going to delete '{instance_id}' [name={name}] ".format(instance_id=instance_id, name=name)) response = client.terminate_instances(InstanceIds=[instance_id]) LOGGER.debug("Done. Result: %s\n", response['TerminatingInstances']) def list_elastic_ips_aws(tags_dict=None, region_name=None, group_as_region=False, verbose=False): """ list all elastic ips with specific tags AWS :param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"} :param region_name: name of the region to list :param group_as_region: if True the results would be grouped into regions :param verbose: if True will log progress information :return: instances dict where region is a key """ elastic_ips = {} aws_regions = [region_name] if region_name else AWS_REGIONS def get_elastic_ips(region): if verbose: LOGGER.info('Going to list aws region "%s"', region) time.sleep(random.random()) client = boto3.client('ec2', region_name=region) custom_filter = [] if tags_dict: custom_filter = [{'Name': 'tag:{}'.format(key), 'Values': [value]} for key, value in tags_dict.items()] response = client.describe_addresses(Filters=custom_filter) elastic_ips[region] = [ip for ip in response['Addresses']] if verbose: LOGGER.info("%s: done [%s/%s]", region, len(elastic_ips.keys()), len(aws_regions)) ParallelObject(aws_regions, timeout=100).run(get_elastic_ips) if not group_as_region: elastic_ips = list(itertools.chain(*elastic_ips.values())) # flatten the list of lists total_items = elastic_ips else: total_items = sum([len(value) for _, value in elastic_ips.items()]) if verbose: LOGGER.info("Found total of %s ips.", total_items) return elastic_ips def clean_elastic_ips_aws(tags_dict): """ Remove all elastic ips with specific tags AWS :param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"} :return: None """ assert tags_dict, "tags_dict not provided (can't clean all instances)" aws_instances = list_elastic_ips_aws(tags_dict=tags_dict, group_as_region=True) for region, eip_list in aws_instances.items(): client = boto3.client('ec2', region_name=region) for eip in eip_list: association_id = eip.get('AssociationId', None) if association_id: response = client.disassociate_address(AssociationId=association_id) LOGGER.debug("disassociate_address. Result: %s\n", response) allocation_id = eip['AllocationId'] LOGGER.info("Going to release '{allocation_id}' [public_ip={public_ip}] ".format( allocation_id=allocation_id, public_ip=eip['PublicIp'])) response = client.release_address(AllocationId=allocation_id) LOGGER.debug("Done. Result: %s\n", response) def get_all_gce_regions(): from sdcm.keystore import KeyStore gcp_credentials = KeyStore().get_gcp_credentials() gce_driver = get_driver(Provider.GCE) compute_engine = gce_driver(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com", gcp_credentials["private_key"], project=gcp_credentials["project_id"]) all_gce_regions = [region_obj.name for region_obj in compute_engine.region_list] return all_gce_regions def gce_meta_to_dict(metadata): meta_dict = {} data = metadata.get("items") if data: for item in data: key = item["key"] if key: # sometimes key is empty string meta_dict[key] = item["value"] return meta_dict def filter_gce_by_tags(tags_dict, instances): filtered_instances = [] for instance in instances: tags = gce_meta_to_dict(instance.extra['metadata']) found_keys = set(k for k in tags_dict if k in tags and tags_dict[k] == tags[k]) if found_keys == set(tags_dict.keys()): filtered_instances.append(instance) return filtered_instances def list_instances_gce(tags_dict=None, running=False, verbose=False): """ list all instances with specific tags GCE :param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"} :return: None """ # avoid cyclic dependency issues, since too many things import utils.py from sdcm.keystore import KeyStore gcp_credentials = KeyStore().get_gcp_credentials() gce_driver = get_driver(Provider.GCE) compute_engine = gce_driver(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com", gcp_credentials["private_key"], project=gcp_credentials["project_id"]) if verbose: LOGGER.info("Going to get all instances from GCE") all_gce_instances = compute_engine.list_nodes() # filter instances by tags since libcloud list_nodes() doesn't offer any filtering if tags_dict: instances = filter_gce_by_tags(tags_dict=tags_dict, instances=all_gce_instances) else: instances = all_gce_instances if running: # https://libcloud.readthedocs.io/en/latest/compute/api.html#libcloud.compute.types.NodeState instances = [i for i in instances if i.state == 'running'] else: instances = [i for i in instances if not i.state == 'terminated'] if verbose: LOGGER.info("Done. Found total of %s instances.", len(instances)) return instances def clean_instances_gce(tags_dict): """ Remove all instances with specific tags GCE :param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"} :return: None """ assert tags_dict, "tags_dict not provided (can't clean all instances)" all_gce_instances = list_instances_gce(tags_dict=tags_dict) for instance in all_gce_instances: LOGGER.info("Going to delete: {}".format(instance.name)) # https://libcloud.readthedocs.io/en/latest/compute/api.html#libcloud.compute.base.Node.destroy res = instance.destroy() LOGGER.info("{} deleted. res={}".format(instance.name, res)) _SCYLLA_AMI_CACHE = defaultdict(dict) def get_scylla_ami_versions(region): """ get the list of all the formal scylla ami from specific region :param region: the aws region to look in :return: list of ami data :rtype: list """ if _SCYLLA_AMI_CACHE[region]: return _SCYLLA_AMI_CACHE[region] ec2 = boto3.client('ec2', region_name=region) response = ec2.describe_images( Owners=['797456418907'], # ScyllaDB Filters=[ {'Name': 'name', 'Values': ['ScyllaDB *']}, ], ) _SCYLLA_AMI_CACHE[region] = sorted(response['Images'], key=lambda x: x['CreationDate'], reverse=True) return _SCYLLA_AMI_CACHE[region] _S3_SCYLLA_REPOS_CACHE = defaultdict(dict) def get_s3_scylla_repos_mapping(dist_type='centos', dist_version=None): """ get the mapping from version prefixes to rpm .repo or deb .list files locations :param dist_type: which distro to look up centos/ubuntu/debian :param dist_version: famaily name of the distro version :return: a mapping of versions prefixes to repos :rtype: dict """ if (dist_type, dist_version) in _S3_SCYLLA_REPOS_CACHE: return _S3_SCYLLA_REPOS_CACHE[(dist_type, dist_version)] s3_client = boto3.client('s3') bucket = 'downloads.scylladb.com' if dist_type == 'centos': response = s3_client.list_objects(Bucket=bucket, Prefix='rpm/centos/', Delimiter='/') for repo_file in response['Contents']: filename = os.path.basename(repo_file['Key']) # only if path look like 'rpm/centos/scylla-1.3.repo', we deem it formal one if filename.startswith('scylla-') and filename.endswith('.repo'): version_prefix = filename.replace('.repo', '').split('-')[-1] _S3_SCYLLA_REPOS_CACHE[( dist_type, dist_version)][version_prefix] = "https://s3.amazonaws.com/{bucket}/{path}".format(bucket=bucket, path=repo_file['Key']) elif dist_type == 'ubuntu' or dist_type == 'debian': response = s3_client.list_objects(Bucket=bucket, Prefix='deb/{}/'.format(dist_type), Delimiter='/') for repo_file in response['Contents']: filename = os.path.basename(repo_file['Key']) # only if path look like 'deb/debian/scylla-3.0-jessie.list', we deem it formal one if filename.startswith('scylla-') and filename.endswith('-{}.list'.format(dist_version)): version_prefix = filename.replace('-{}.list'.format(dist_version), '').split('-')[-1] _S3_SCYLLA_REPOS_CACHE[( dist_type, dist_version)][version_prefix] = "https://s3.amazonaws.com/{bucket}/{path}".format(bucket=bucket, path=repo_file['Key']) else: raise NotImplementedError("[{}] is not yet supported".format(dist_type)) return _S3_SCYLLA_REPOS_CACHE[(dist_type, dist_version)] def pid_exists(pid): """ Return True if a given PID exists. :param pid: Process ID number. """ try: os.kill(pid, 0) except OSError as detail: if detail.errno == errno.ESRCH: return False return True def safe_kill(pid, signal): """ Attempt to send a signal to a given process that may or may not exist. :param signal: Signal number. """ try: os.kill(pid, signal) return True except Exception: # pylint: disable=broad-except return False class FileFollowerIterator(object): # pylint: disable=too-few-public-methods def __init__(self, filename, thread_obj): self.filename = filename self.thread_obj = thread_obj def __iter__(self): with open(self.filename, 'r') as input_file: line = '' while not self.thread_obj.stopped(): poller = select.poll() # pylint: disable=no-member poller.register(input_file, select.POLLIN) # pylint: disable=no-member if poller.poll(100): line += input_file.readline() if not line or not line.endswith('\n'): time.sleep(0.1) continue yield line line = '' yield line class FileFollowerThread(object): def __init__(self): self.executor = concurrent.futures.ThreadPoolExecutor(1) self._stop_event = threading.Event() self.future = None def __enter__(self): self.start() def __exit__(self, exc_type, exc_val, exc_tb): self.stop() def run(self): raise NotImplementedError() def start(self): self.future = self.executor.submit(self.run) return self.future def stop(self): self._stop_event.set() def stopped(self): return self._stop_event.is_set() def follow_file(self, filename): return FileFollowerIterator(filename, self) class ScyllaCQLSession(object): def __init__(self, session, cluster): self.session = session self.cluster = cluster def __enter__(self): return self.session def __exit__(self, exc_type, exc_val, exc_tb): self.cluster.shutdown() class MethodVersionNotFound(Exception): pass class version(object): # pylint: disable=invalid-name,too-few-public-methods VERSIONS = {} """ Runs a method according to the version attribute of the class method Limitations: currently, can't work if the same method name in the same file used in different classes Example: In [3]: class VersionedClass(object): ...: def __init__(self, current_version): ...: self.version = current_version ...: ...: @version("1.2") ...: def setup(self): ...: return "1.2" ...: ...: @version("2") ...: def setup(self): ...: return "2" In [4]: vc = VersionedClass("2") In [5]: vc.setup() Out[5]: '2' In [6]: vc = VersionedClass("1.2") In [7]: vc.setup() Out[7]: '1.2' """ def __init__(self, ver): self.version = ver def __call__(self, func): self.VERSIONS[(self.version, func.func_name, func.func_code.co_filename)] = func @wraps(func) def inner(*args, **kwargs): cls_self = args[0] func_to_run = self.VERSIONS.get((cls_self.version, func.func_name, func.func_code.co_filename)) if func_to_run: return func_to_run(*args, **kwargs) else: raise MethodVersionNotFound("Method '{}' with version '{}' not defined in '{}'!".format( func.func_name, cls_self.version, cls_self.__class__.__name__)) return inner def get_free_port(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('', 0)) addr = sock.getsockname() port = addr[1] sock.close() return port def get_my_ip(): hostname = socket.gethostname() ip = socket.gethostbyname(hostname) return ip def get_branched_ami(ami_version, region_name): """ Get a list of AMIs, based on version match :param ami_version: branch version to look for, ex. 'branch-2019.1:latest', 'branch-3.1:all' :param region_name: the region to look AMIs in :return: list of ec2.images """ branch, build_id = ami_version.split(':') ec2 = boto3.resource('ec2', region_name=region_name) LOGGER.info("Looking for AMI match [%s]", ami_version) if build_id == 'latest' or build_id == 'all': filters = [{'Name': 'tag:branch', 'Values': [branch]}] else: filters = [{'Name': 'tag:branch', 'Values': [branch]}, {'Name': 'tag:build-id', 'Values': [build_id]}] amis = list(ec2.images.filter(Filters=filters)) amis = sorted(amis, key=lambda x: x.creation_date, reverse=True) assert amis, "AMI matching [{}] wasn't found on {}".format(ami_version, region_name) if build_id == 'all': return amis else: return amis[:1] def get_ami_tags(ami_id, region_name): """ Get a list of tags of a specific AMI :param ami_id: :param region_name: the region to look AMIs in :return: dict of tags """ ec2 = boto3.resource('ec2', region_name=region_name) test_image = ec2.Image(ami_id) if test_image.tags: return {i['Key']: i['Value'] for i in test_image.tags} else: return {} def tag_ami(ami_id, tags_dict, region_name): tags = [{'Key': key, 'Value': value} for key, value in tags_dict.items()] ec2 = boto3.resource('ec2', region_name=region_name) test_image = ec2.Image(ami_id) tags += test_image.tags test_image.create_tags(Tags=tags) LOGGER.info("tagged %s with %s", ami_id, tags) def get_non_system_ks_cf_list(loader_node, db_node, request_timeout=300, filter_out_table_with_counter=False, filter_out_mv=False): """Get all not system keyspace.tables pairs Arguments: loader_node {BaseNode} -- LoaderNoder to send request db_node_ip {str} -- ip of db_node """ # pylint: disable=too-many-locals def get_tables_columns_list(entity_type): if entity_type == 'view': cmd = "paging off; SELECT keyspace_name, view_name FROM system_schema.views" else: cmd = "paging off; SELECT keyspace_name, table_name, type FROM system_schema.columns" result = loader_node.run_cqlsh(cmd=cmd, timeout=request_timeout, verbose=False, target_db_node=db_node, split=True, connect_timeout=request_timeout) if not result: return [] splitter_result = [] for row in result[4:]: if '|' not in row: continue if row.startswith('system'): continue splitter_result.append(row.split('|')) return splitter_result views_list = set() if filter_out_mv: tables = get_tables_columns_list('view') for table in tables: views_list.add('.'.join([name.strip() for name in table[:2]])) views_list = list(views_list) result = get_tables_columns_list('column') if not result: return [] avaialable_ks_cf = defaultdict(list) for row in result: ks_cf_name = '.'.join([name.strip() for name in row[:2]]) if filter_out_mv and ks_cf_name in views_list: continue column_type = row[2].strip() avaialable_ks_cf[ks_cf_name].append(column_type) if filter_out_table_with_counter: for ks_cf, column_types in avaialable_ks_cf.items(): if 'counter' in column_types: avaialable_ks_cf.pop(ks_cf) return avaialable_ks_cf.keys() def remove_files(path): LOGGER.debug("Remove path %s", path) try: if os.path.isdir(path): shutil.rmtree(path=path, ignore_errors=True) if os.path.isfile(path): os.remove(path) except Exception as details: # pylint: disable=broad-except LOGGER.error("Error during remove archived logs %s", details) def format_timestamp(timestamp): return datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S') def makedirs(path): """ TODO: when move to python3, this function will be replaced with os.makedirs function: os.makedirs(name, mode=0o777, exist_ok=False) """ try: os.makedirs(path) except OSError: if os.path.exists(path): return raise def wait_ami_available(client, ami_id): """Wait while ami_id become available Wait while ami_id become available, after 10 minutes return an error Arguments: client {boto3.EC2.Client} -- client of EC2 service ami_id {str} -- ami id to check availability """ waiter = client.get_waiter('image_available') waiter.wait(ImageIds=[ami_id], WaiterConfig={ 'Delay': 30, 'MaxAttempts': 20} ) def update_certificates(): """ Update the certificate of server encryption, which might be expired. """ try: from sdcm.remote import LocalCmdRunner localrunner = LocalCmdRunner() localrunner.run('openssl x509 -req -in data_dir/ssl_conf/example/db.csr -CA data_dir/ssl_conf/cadb.pem -CAkey data_dir/ssl_conf/example/cadb.key -CAcreateserial -out data_dir/ssl_conf/db.crt -days 365') localrunner.run('openssl x509 -enddate -noout -in data_dir/ssl_conf/db.crt') except Exception as ex: raise Exception('Failed to update certificates by openssl: %s' % ex) def s3_download_dir(bucket, path, target): """ Downloads recursively the given S3 path to the target directory. :param bucket: the name of the bucket to download from :param path: The S3 directory to download. :param target: the local directory to download the files to. """ client = boto3.client('s3') # Handle missing / at end of prefix if not path.endswith('/'): path += '/' if path.startswith('/'): path = path[1:] result = client.list_objects_v2(Bucket=bucket, Prefix=path) # Download each file individually for key in result['Contents']: # Calculate relative path rel_path = key['Key'][len(path):] # Skip paths ending in / if not key['Key'].endswith('/'): local_file_path = os.path.join(target, rel_path) # Make sure directories exist local_file_dir = os.path.dirname(local_file_path) makedirs(local_file_dir) LOGGER.info("Downloading %s from s3 to %s", key['Key'], local_file_path) client.download_file(bucket, key['Key'], local_file_path) def gce_download_dir(bucket, path, target): """ Downloads recursively the given google storage path to the target directory. :param bucket: the name of the bucket to download from :param path: The google storage directory to download. :param target: the local directory to download the files to. """ from sdcm.keystore import KeyStore gcp_credentials = KeyStore().get_gcp_credentials() gce_driver = libcloud.storage.providers.get_driver(libcloud.storage.types.Provider.GOOGLE_STORAGE) driver = gce_driver(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com", gcp_credentials["private_key"], project=gcp_credentials["project_id"]) if not path.endswith('/'): path += '/' if path.startswith('/'): path = path[1:] container = driver.get_container(container_name=bucket) dir_listing = driver.list_container_objects(container, ex_prefix=path) for obj in dir_listing: rel_path = obj.name[len(path):] local_file_path = os.path.join(target, rel_path) local_file_dir = os.path.dirname(local_file_path) makedirs(local_file_dir) LOGGER.info("Downloading %s from gcp to %s", obj.name, local_file_path) obj.download(destination_path=local_file_path, overwrite_existing=True) def download_dir_from_cloud(url): """ download a directory from AWS S3 or from google storage :param url: a url that starts with `s3://` or `gs://` :return: the temp directory create with the downloaded content """ if url is None: return url md5 = hashlib.md5() md5.update(url) tmp_dir = os.path.join('/tmp/download_from_cloud', md5.hexdigest()) parsed = urlparse(url) LOGGER.info("Downloading [%s] to [%s]", url, tmp_dir) if os.path.isdir(tmp_dir) and os.listdir(tmp_dir): LOGGER.warning("[{}] already exists, skipping download".format(tmp_dir)) else: if url.startswith('s3://'): s3_download_dir(parsed.hostname, parsed.path, tmp_dir) elif url.startswith('gs://'): gce_download_dir(parsed.hostname, parsed.path, tmp_dir) elif os.path.isdir(url): tmp_dir = url else: raise ValueError("Unsupported url schema or non-existing directory [{}]".format(url)) if not tmp_dir.endswith('/'): tmp_dir += '/' LOGGER.info("Finished downloading [%s]", url) return tmp_dir def filter_aws_instances_by_type(instances): filtered_instances = { "db_nodes": [], "loader_nodes": [], "monitor_nodes": [] } for instance in instances: name = [tag['Value'] for tag in instance['Tags'] if tag['Key'] == 'Name'] if 'db-node' in name[0]: filtered_instances["db_nodes"].append(instance) if 'monitor-node' in name[0]: filtered_instances["monitor_nodes"].append(instance) if 'loader-node' in name[0]: filtered_instances["loader_nodes"].append(instance) return filtered_instances def filter_gce_instances_by_type(instances): filtered_instances = { "db_nodes": [], "loader_nodes": [], "monitor_nodes": [] } for instance in instances: if 'db-nodes' in instance.name: filtered_instances["db_nodes"].append(instance) if 'monitor-node' in instance.name: filtered_instances["monitor_nodes"].append(instance) if 'loader-node' in instance.name: filtered_instances["loader_nodes"].append(instance) return filtered_instances BUILDERS = [ { "name": "aws-scylla-qa-builder3", "public_ip": "18.235.64.163", "user": "jenkins", "key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2") }, { "name": "aws-eu-west1-qa-builder1", "public_ip": "18.203.132.87", "user": "jenkins", "key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2") }, { "name": "aws-eu-west1-qa-builder2", "public_ip": "34.244.95.165", "user": "jenkins", "key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2") }, { "name": "aws-eu-west1-qa-builder4", "public_ip": "34.253.184.117", "user": "jenkins", "key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2") }, { "name": "aws-eu-west1-qa-builder4", "public_ip": "52.211.130.106", "user": "jenkins", "key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2") } ] def get_builder_by_test_id(test_id): from sdcm.remote import RemoteCmdRunner base_path_on_builder = "/home/jenkins/slave/workspace" found_builders = [] def search_test_id_on_builder(builder): remoter = RemoteCmdRunner(builder['public_ip'], user=builder['user'], key_file=builder['key_file']) LOGGER.info('Search on %s', builder['name']) result = remoter.run("find {where} -name test_id | xargs grep -rl {test_id}".format(where=base_path_on_builder, test_id=test_id), ignore_status=True, verbose=False) if not result.exited and not result.stderr: path = result.stdout.strip() LOGGER.info("Builder name %s, ip %s, folder %s", builder['name'], builder['public_ip'], path) return {"builder": builder, "path": os.path.dirname(path)} else: LOGGER.info("Nothing found") return None search_obj = ParallelObject(BUILDERS, timeout=30, num_workers=len(BUILDERS)) results = search_obj.run(search_test_id_on_builder) found_builders = [builder for builder in results if builder] if not found_builders: LOGGER.info("Nothing found for %s", test_id) return found_builders def get_post_behavior_actions(config): action_per_type = { "db_nodes": None, "monitor_nodes": None, "loader_nodes": None } for key in action_per_type: config_key = 'post_behavior_{}'.format(key) old_config_key = config.get('failure_post_behavior', 'destroy') action_per_type[key] = config.get(config_key, old_config_key) return action_per_type def clean_aws_instances_according_post_behavior(params, config, logdir): # pylint: disable=invalid-name status = get_testrun_status(params.get('TestId'), logdir) def apply_action(instances, action): if action == 'destroy': instances_ids = [instance['InstanceId'] for instance in instances] LOGGER.info('Clean next instances %s', instances_ids) client.terminate_instances(InstanceIds=instances_ids) elif action == 'keep-on-failure': if status: LOGGER.info('Run failed. Leave instances running') else: LOGGER.info('Run was Successful. Killing nodes') apply_action(instances, action='destroy') elif action == 'keep': LOGGER.info('Leave instances running') else: LOGGER.warning('Unsupported action %s', action) aws_instances = list_instances_aws(params, group_as_region=True) for region, instances in aws_instances.items(): if not instances: continue client = boto3.client("ec2", region_name=region) filtered_instances = filter_aws_instances_by_type(instances) actions_per_type = get_post_behavior_actions(config) for instance_set_type, action in actions_per_type.items(): LOGGER.info('Apply action "%s" for %s instances', action, instance_set_type) apply_action(filtered_instances[instance_set_type], action) def clean_gce_instances_according_post_behavior(params, config, logdir): # pylint: disable=invalid-name status = get_testrun_status(params.get('TestId'), logdir) def apply_action(instances, action): if action == 'destroy': for instance in filtered_instances['db_nodes']: LOGGER.info('Destroying instance: %s', instance.name) instance.destroy() LOGGER.info('Destroyed instance: %s', instance.name) elif action == 'keep-on-failure': if status: LOGGER.info('Run failed. Leave instances running') else: LOGGER.info('Run wasSuccessful. Killing nodes') apply_action(instances, action='destroy') elif action == 'keep': LOGGER.info('Leave instances runing') else: LOGGER.warning('Unsupported action %s', action) gce_instances = list_instances_gce(params) filtered_instances = filter_gce_instances_by_type(gce_instances) actions_per_type = get_post_behavior_actions(config) for instance_set_type, action in actions_per_type.items(): apply_action(filtered_instances[instance_set_type], action) def search_test_id_in_latest(logdir): from sdcm.remote import LocalCmdRunner test_id = None result = LocalCmdRunner().run('cat {0}/latest/test_id'.format(logdir), ignore_status=True) if not result.exited and result.stdout: test_id = result.stdout.strip() LOGGER.info("Found latest test_id: {}".format(test_id)) LOGGER.info("Collect logs for test-run with test-id: {}".format(test_id)) else: LOGGER.error('test_id not found. Exit code: %s; Error details %s', result.exited, result.stderr) return test_id def get_testrun_dir(base_dir, test_id=None): from sdcm.remote import LocalCmdRunner if not test_id: test_id = search_test_id_in_latest(base_dir) LOGGER.info('Search dir with logs locally for test id: %s', test_id) search_cmd = "find {base_dir} -name test_id | xargs grep -rl {test_id}".format(**locals()) result = LocalCmdRunner().run(cmd=search_cmd, ignore_status=True) LOGGER.info("Search result %s", result) if result.exited == 0 and result.stdout: found_dirs = result.stdout.strip().split('\n') LOGGER.info(found_dirs) return os.path.dirname(found_dirs[0]) LOGGER.info("No any dirs found locally for current test id") return None def get_testrun_status(test_id=None, logdir=None): testrun_dir = get_testrun_dir(logdir, test_id) status = None if testrun_dir: with open(os.path.join(testrun_dir, 'events_log/critical.log')) as f: # pylint: disable=invalid-name status = f.readlines() return status def download_encrypt_keys(): """ Download certificate files of encryption at-rest from S3 KeyStore """ from sdcm.keystore import KeyStore ks = KeyStore() if not os.path.exists('./data_dir/encrypt_conf/CA.pem'): ks.download_file('CA.pem', './data_dir/encrypt_conf/CA.pem') if not os.path.exists('./data_dir/encrypt_conf/SCYLLADB.pem'): ks.download_file('SCYLLADB.pem', './data_dir/encrypt_conf/SCYLLADB.pem') ```
[ { "content": "Return the code unaltered:\n```python\n# -*- coding: utf-8 -*-\n#\n# (c) Copyright 2003-2009 Hewlett-Packard Development Company, L.P.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Soft...
[ { "content": "Return the code unaltered:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# (c) Copyright 2003-2009 Hewlett-Packard Development Company, L.P.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\...
```python # -*- coding: utf-8 -*- # # (c) Copyright 2003-2009 Hewlett-Packard Development Company, L.P. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Author: Don Welch # # NOTE: This module is safe for 'from g import *' # # Std Lib import sys import os import os.path import ConfigParser import locale import pwd import stat import re # Local from codes import * import logger # System wide logger log = logger.Logger('', logger.Logger.LOG_LEVEL_INFO, logger.Logger.LOG_TO_CONSOLE) log.set_level('info') MINIMUM_PYQT_MAJOR_VER = 3 MINIMUM_PYQT_MINOR_VER = 14 MINIMUM_QT_MAJOR_VER = 3 MINIMUM_QT_MINOR_VER = 0 def to_bool(s, default=False): if isinstance(s, str) and s: if s[0].lower() in ['1', 't', 'y']: return True elif s[0].lower() in ['0', 'f', 'n']: return False elif isinstance(s, bool): return s return default # System wide properties class Properties(dict): def __getattr__(self, attr): if attr in self.keys(): return self.__getitem__(attr) else: return "" def __setattr__(self, attr, val): self.__setitem__(attr, val) prop = Properties() class ConfigBase(object): def __init__(self, filename): self.filename = filename self.conf = ConfigParser.ConfigParser() self.read() def get(self, section, key, default=u''): try: return self.conf.get(section, key) except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): return default def set(self, section, key, value): if not self.conf.has_section(section): self.conf.add_section(section) self.conf.set(section, key, value) self.write() def sections(self): return self.conf.sections() def has_section(self, section): return self.conf.has_section(section) def options(self, section): return self.conf.options(section) keys = options def read(self): if self.filename is not None: filename = self.filename if filename.startswith("/root/"): # Don't try opening a file in root's home directory. log.error("attempted to read from '%s'" % self.filename) return try: fp = open(self.filename, "r") self.conf.readfp(fp) fp.close() except (OSError, IOError): log.debug("Unable to open file %s for reading." % self.filename) def write(self): if self.filename is not None: filename = self.filename if filename.startswith("/root/") or filename.startswith("/etc/"): # Don't try writing a file in root's home directory or # the system-wide config file. # See bug #479178. log.error("attempted to write to '%s'" % self.filename) return try: fp = open(self.filename, "w") self.conf.write(fp) fp.close() except (OSError, IOError): log.debug("Unable to open file %s for writing." % self.filename) class SysConfig(ConfigBase): def __init__(self): ConfigBase.__init__(self, '/etc/hp/hplip.conf') class State(ConfigBase): def __init__(self): ConfigBase.__init__(self, '/var/lib/hp/hplip.state') class UserConfig(ConfigBase): def __init__(self): if not os.geteuid() == 0: prop.user_dir = os.path.expanduser('~/.hplip') try: if not os.path.exists(prop.user_dir): os.makedirs(prop.user_dir) except OSError: pass # This is sometimes OK, if running hpfax: for example prop.user_config_file = os.path.join(prop.user_dir, 'hplip.conf') if not os.path.exists(prop.user_config_file): try: file(prop.user_config_file, 'w').close() s = os.stat(os.path.dirname(prop.user_config_file)) os.chown(prop.user_config_file, s[stat.ST_UID], s[stat.ST_GID]) except IOError: pass ConfigBase.__init__(self, prop.user_config_file) else: # If running as root, conf file is None prop.user_dir = None prop.user_config_file = None ConfigBase.__init__(self, None) def workingDirectory(self): t = self.get('last_used', 'working_dir', os.path.expanduser("~")) try: t = t.decode('utf-8') except UnicodeError: log.error("Invalid unicode: %s" % t) log.debug("working directory: %s" % t) return t def setWorkingDirectory(self, t): self.set('last_used', 'working_dir', t.encode('utf-8')) log.debug("working directory: %s" % t.encode('utf-8')) os.umask(0037) # System Config File: Directories and build settings. Not altered after installation. sys_conf = SysConfig() # System State File: System-wide runtime settings sys_state = State() # Per-user Settings File: (Note: For Qt4 code, limit the use of this to non-GUI apps. only) user_conf = UserConfig() # Language settings try: prop.locale, prop.encoding = locale.getdefaultlocale() except ValueError: prop.locale = 'en_US' prop.encoding = 'UTF8' prop.version = sys_conf.get('hplip', 'version', '0.0.0') # e.g., 3.9.2b.10 _p, _x = re.compile(r'(\d\w*)', re.I), [] for _y in prop.version.split('.')[:3]: _z = _p.match(_y) if _z is not None: _x.append(_z.group(1)) prop.installed_version = '.'.join(_x) # e.g., '3.9.2' try: prop.installed_version_int = int(''.join(['%02x' % int(_y) for _y in _x]), 16) # e.g., 0x030902 -> 198914 except ValueError: prop.installed_version_int = 0 prop.home_dir = sys_conf.get('dirs', 'home', os.path.realpath(os.path.normpath(os.getcwd()))) prop.username = pwd.getpwuid(os.getuid())[0] pdb = pwd.getpwnam(prop.username) prop.userhome = pdb[5] prop.history_size = 50 prop.data_dir = os.path.join(prop.home_dir, 'data') prop.image_dir = os.path.join(prop.home_dir, 'data', 'images') prop.xml_dir = os.path.join(prop.home_dir, 'data', 'xml') prop.models_dir = os.path.join(prop.home_dir, 'data', 'models') prop.localization_dir = os.path.join(prop.home_dir, 'data', 'localization') prop.max_message_len = 8192 prop.max_message_read = 65536 prop.read_timeout = 90 prop.ppd_search_path = '/usr/share;/usr/local/share;/usr/lib;/usr/local/lib;/usr/libexec;/opt;/usr/lib64' prop.ppd_search_pattern = 'HP-*.ppd.*' prop.ppd_download_url = 'http://www.linuxprinting.org/ppd-o-matic.cgi' prop.ppd_file_suffix = '-hpijs.ppd' # Build and install configurations prop.gui_build = to_bool(sys_conf.get('configure', 'gui-build', '0')) prop.net_build = to_bool(sys_conf.get('configure', 'network-build', '0')) prop.par_build = to_bool(sys_conf.get('configure', 'pp-build', '0')) prop.usb_build = True prop.scan_build = to_bool(sys_conf.get('configure', 'scanner-build', '0')) prop.fax_build = to_bool(sys_conf.get('configure', 'fax-build', '0')) prop.doc_build = to_bool(sys_conf.get('configure', 'doc-build', '0')) prop.foomatic_xml_install = to_bool(sys_conf.get('configure', 'foomatic-xml-install', '0')) prop.foomatic_ppd_install = to_bool(sys_conf.get('configure', 'foomatic-ppd-install', '0')) prop.hpcups_build = to_bool(sys_conf.get('configure', 'hpcups-install', '0')) prop.hpijs_build = to_bool(sys_conf.get('configure', 'hpijs-install', '0')) # Spinner, ala Gentoo Portage spinner = "\|/-\|/-" spinpos = 0 def update_spinner(): global spinner, spinpos if not log.is_debug() and sys.stdout.isatty(): sys.stdout.write("\b" + spinner[spinpos]) spinpos=(spinpos + 1) % 8 sys.stdout.flush() def cleanup_spinner(): if not log.is_debug() and sys.stdout.isatty(): sys.stdout.write("\b \b") sys.stdout.flush() # Internal/messaging errors ERROR_STRINGS = { ERROR_SUCCESS : 'No error', ERROR_UNKNOWN_ERROR : 'Unknown error', ERROR_DEVICE_NOT_FOUND : 'Device not found', ERROR_INVALID_DEVICE_ID : 'Unknown/invalid device-id field', ERROR_INVALID_DEVICE_URI : 'Unknown/invalid device-uri field', ERROR_DATA_LENGTH_EXCEEDS_MAX : 'Data length exceeds maximum', ERROR_DEVICE_IO_ERROR : 'Device I/O error', ERROR_NO_PROBED_DEVICES_FOUND : 'No probed devices found', ERROR_DEVICE_BUSY : 'Device busy', ERROR_DEVICE_STATUS_NOT_AVAILABLE : 'DeviceStatus not available', ERROR_INVALID_SERVICE_NAME : 'Invalid service name', ERROR_ERROR_INVALID_CHANNEL_ID : 'Invalid channel-id (service name)', ERROR_CHANNEL_BUSY : 'Channel busy', ERROR_DEVICE_DOES_NOT_SUPPORT_OPERATION : 'Device does not support operation', ERROR_DEVICEOPEN_FAILED : 'Device open failed', ERROR_INVALID_DEVNODE : 'Invalid device node', ERROR_INVALID_HOSTNAME : "Invalid hostname ip address", ERROR_INVALID_PORT_NUMBER : "Invalid JetDirect port number", ERROR_NO_CUPS_QUEUE_FOUND_FOR_DEVICE : "No CUPS queue found for device.", ERROR_DATFILE_ERROR: "DAT file error", ERROR_INVALID_TIMEOUT: "Invalid timeout", ERROR_IO_TIMEOUT: "I/O timeout", ERROR_FAX_INCOMPATIBLE_OPTIONS: "Incompatible fax options", ERROR_FAX_INVALID_FAX_FILE: "Invalid fax file", ERROR_FAX_FILE_NOT_FOUND: "Fax file not found", ERROR_INTERNAL : 'Unknown internal error', } class Error(Exception): def __init__(self, opt=ERROR_INTERNAL): self.opt = opt self.msg = ERROR_STRINGS.get(opt, ERROR_STRINGS[ERROR_INTERNAL]) log.debug("Exception: %d (%s)" % (opt, self.msg)) Exception.__init__(self, self.msg, opt) # Make sure True and False are avail. in pre-2.2 versions try: True except NameError: True = (1==1) False = not True # as new translations are completed, add them here supported_locales = { 'en_US': ('us', 'en', 'en_us', 'american', 'america', 'usa', 'english'),} # Localization support was disabled in 3.9.2 #'zh_CN': ('zh', 'cn', 'zh_cn' , 'china', 'chinese', 'prc'), #'de_DE': ('de', 'de_de', 'german', 'deutsche'), #'fr_FR': ('fr', 'fr_fr', 'france', 'french', 'français'), #'it_IT': ('it', 'it_it', 'italy', 'italian', 'italiano'), #'ru_RU': ('ru', 'ru_ru', 'russian'), #'pt_BR': ('pt', 'br', 'pt_br', 'brazil', 'brazilian', 'portuguese', 'brasil', 'portuguesa'), #'es_MX': ('es', 'mx', 'es_mx', 'mexico', 'spain', 'spanish', 'espanol', 'español'), #} ```
[ { "content": "```python\n#!/usr/bin/env python3\n\nimport sys\nprint(\"QPID support NOT enabled! Will NOT connect to any broker, and messages will be lost!\")\n\n\n\"\"\"\n Exceptions.\n\"\"\"\n\nclass ProtonException(Exception):\n pass\n\nclass SessionError(Exception):\n pass\n\nclass Timeout(Exception):\n ...
[ { "content": "<|memory_start|>```python\n#!/usr/bin/env python3\n\nimport sys\nprint(\"QPID support NOT enabled! Will NOT connect to any broker, and messages will be lost!\")\n\n\n\"\"\"\n Exceptions.\n\"\"\"\n\nclass ProtonException(Exception):\n pass\n\nclass SessionError(Exception):\n pass\n\nclass Timeou...
```python #!/usr/bin/env python3 import sys print("QPID support NOT enabled! Will NOT connect to any broker, and messages will be lost!") """ Exceptions. """ class ProtonException(Exception): pass class SessionError(Exception): pass class Timeout(Exception): pass """ Messages. """ class Message(object): def __init__(self, content_type, durable): self.content = "" """ Communication. """ class Sender(object): def __init__(self, dest): self.dest = dest def send(self, msg): pass class Receiver(object): def __init__(self, source): self.capacity = 0 self.source = source def fetch(self): return None class Session(object): def sender(self, address): return Sender(address) def receiver(self, address): return Receiver(address) def next_receiver(self, timeout=0): return Receiver("unknown") def acknowledge(self, msg): pass class Connection(object): def __init__(self, broker): self.reconnect = False def open(self): pass def close(self, timeout=0): pass def session(self): return Session() ```
[ { "content": "Provide a verbatim copy of the code:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2016 University of Oslo, Norway\n#\n# This file is part of Cerebrum.\n#\n# Cerebrum is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public L...
[ { "content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2016 University of Oslo, Norway\n#\n# This file is part of Cerebrum.\n#\n# Cerebrum is free software; you can redistribute it and/or modify it\n# under the terms of the GNU ...
```python #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2016 University of Oslo, Norway # # This file is part of Cerebrum. # # Cerebrum is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Cerebrum is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Cerebrum; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Consumes events from SAP and updates Cerebrum.""" from __future__ import unicode_literals import time import datetime import requests import json from collections import OrderedDict from six import text_type, integer_types from mx import DateTime from aniso8601.exceptions import ISOFormatError import cereconf from Cerebrum import Errors from Cerebrum.utils.date import parse_date, date_to_datetime, apply_timezone from Cerebrum.Utils import Factory, read_password from Cerebrum.modules.event.mapping import CallbackMap from Cerebrum.modules.automatic_group.structure import (get_automatic_group, update_memberships) from Cerebrum.config.configuration import (ConfigDescriptor, Namespace, Configuration) from Cerebrum.config.settings import String from Cerebrum.config.loader import read, read_config from Cerebrum.modules.event_consumer import get_consumer from Cerebrum.modules.event_consumer.config import AMQPClientConsumerConfig from Cerebrum.modules.event_publisher.mock_client import MockClient from Cerebrum.modules.event_publisher.amqp_publisher import (PublisherConfig, AMQP091Publisher) logger = Factory.get_logger('cronjob') AccountClass = Factory.get('Account') callback_functions = CallbackMap() callback_filters = CallbackMap() # INTEGER_TYPE is long in in python2 and will be int in python3 INTEGER_TYPE = integer_types[-1] LEADER_GROUP_PREFIX = 'adm-leder-' def filter_meta(l): """Filter out the __metadata key of a dict.""" return dict(filter(lambda (k, _): k != '__metadata', l.items())) def translate_keys(d, m): """Translate keys in accordance to a LUT. :type d: dict :param d: The dict whose keys to convert. :type m: dict :param m: A lookup table. :rtype: dict :return: The converted dict.""" return map(lambda (k, v): (m.get(k, None), v), d.items()) def filter_elements(d): """Filter out all elements that do not evaluate to True. :type d: list(tuple(k, v)) :param d: A list of tuples to filter. :rtype: list(tuple(k, v)) :return: The filtered list.""" return filter(lambda (k, v): k and v, d) class RemoteSourceUnavailable(Exception): """Exception signaling that the remote system is out of service.""" class RemoteSourceError(Exception): """An error occured in the source system.""" class SourceSystemNotReachedError(Exception): """Package not received from source system.""" class EntityDoesNotExistInSourceSystemError(Exception): """Entity does not exist in source system.""" class ErroneousSourceData(Exception): """An error occured in the source system data.""" class EntityNotResolvableError(Exception): """Distinctive entity could not be resolved with supplied information.""" class SAPWSConsumerConfig(Configuration): """Configuration of the WebService connectivity.""" auth_user = ConfigDescriptor( String, default=u"webservice", doc=u"Username to use when connecting to the WS.") auth_system = ConfigDescriptor( String, default='sap_ws', doc=u"The system name used for the password file, for example 'test'.") class SAPConsumerConfig(Configuration): """Config combining class.""" ws = ConfigDescriptor(Namespace, config=SAPWSConsumerConfig) consumer = ConfigDescriptor(Namespace, config=AMQPClientConsumerConfig) def load_config(cls, name, filepath=None): """Load config for consumer or publisher""" config_cls = cls() if filepath: logger.info('Loading config file: %r', filepath) config_cls.load_dict(read_config(filepath)) else: read(config_cls, name) logger.info('no filepath, using defaults') logger.info('validating config_cls') config_cls.validate() return config_cls def parse_address(d): """Parse the data from SAP an return a diff-able structure. :type d: dict :param d: Data from SAP :rtype: tuple( (AddressCode, ('city', 'OSLO'), ('postal_number', 0316), ('address_text', 'Postboks 1059 Blindern'))) :return: A tuple with the fields that should be updated""" co = Factory.get('Constants') address_types = ('legalAddress', 'workMailingAddress', 'workVisitingAddress') m = {'legalAddress': co.address_post_private, 'workMailingAddress': co.address_post, 'workVisitingAddress': co.address_street, 'city': 'city', 'postalCode': 'postal_number', 'streetAndHouseNumber': 'address_text'} r = {x: d.get(x, {}) for x in address_types} logger.info('parsing %i addresses', len(r)) # Visiting address should be a concoction of real address and a # meta-location if r.get('workVisitingAddress'): r['workVisitingAddress']['streetAndHouseNumber'] = '{}\n{}'.format( r.get('workVisitingAddress').get('streetAndHouseNumber'), r.get('workVisitingAddress').get('location')) return tuple([(k, tuple(sorted(filter_elements(translate_keys(v, m))))) for (k, v) in filter_elements( translate_keys(filter_meta(r), m))]) def verify_sap_header(header): """Verify that the headers originate from SAP""" if 'sap-server' in header: return header['sap-server'] == 'true' return False def parse_names(d): """Parse data from SAP and return names. :type d: dict :param d: Data from SAP :rtype: tuple((PersonName('FIRST'), 'first'), (PersonName('FIRST'), 'last')) :return: A tuple with the fields that should be updated""" logger.info('parsing names') co = Factory.get('Constants') return ((co.name_first, d.get('firstName')), (co.name_last, d.get('lastName'))) def parse_contacts(d): """Parse data from SAP and return contact information. :type d: dict :param d: Data from SAP :rtype: ((ContactInfo('PHONE'), (('contact_pref', n), ('contact_value', v), ('description', None))),) :return: A tuple with the fields that should be updated""" logger.info('parsing contacts') co = Factory.get('Constants') # TODO: Validate/clean numbers with phonenumbers? m = {'workPhone': co.contact_phone, 'workMobile': co.contact_mobile_phone, 'privateMobile': co.contact_private_mobile, 'publicMobile': co.contact_private_mobile_visible} def expand(l, pref=0): if not l: return tuple() elif len(l) > 1: n = l[1:] else: n = None (k, v) = l[0] return ((k, (('contact_pref', pref), ('contact_value', v), ('description', None)),),) + expand(n, pref + 1) return expand( filter_elements( translate_keys({c: d.get(c) for c in m.keys()}, m))) def parse_titles(d): """Parse data from SAP and return person titles. :type d: dict :param d: Data from SAP :rtype: [tuple(('name_variant', EntityNameCode('PERSONALTITLE')), ('name_language', LanguageCode('en')), ('name', 'Over Engingineer'))] :return: A list of tuples with the fields that should be updated""" def make_tuple(variant, lang, name): return (('name_variant', variant), ('name_language', lang), ('name', name)) co = Factory.get('Constants') logger.info('parsing titles') titles = [] if d.get('personalTitle'): titles.extend( [make_tuple(co.personal_title, co.language_en, d.get('personalTitle', {}).get('en'))] + map(lambda lang: make_tuple( co.personal_title, lang, d.get('personalTitle', {}).get('nb')), [co.language_nb, co.language_nn])) # Select appropriate work title. assignment = None for e in d.get('assignments', {}).get('results', []): if not e.get('jobTitle'): continue if e.get('primaryAssignmentFlag'): assignment = e break if not assignment: assignment = e elif (float(e.get('agreedFTEPercentage')) > float(assignment.get('agreedFTEPercentage'))): assignment = e if assignment: titles.extend(map(lambda (lang_code, lang_str): make_tuple( co.work_title, lang_code, assignment.get('jobTitle').get(lang_str)), [(co.language_nb, 'nb'), (co.language_nn, 'nb'), (co.language_en, 'en')])) return filter(lambda ((vk, vn), (lk, lv), (nk, nv)): nv, titles) def parse_external_ids(d): """Parse data from SAP and return external ids (i.e. passnr). :type d: dict :param d: Data from SAP :rtype: [tuple(EntityExternalId('PASSNR'), '000001')] :return: A list of tuples with the external_ids""" co = Factory.get('Constants') external_ids = [(co.externalid_sap_ansattnr, unicode(d.get('personId')))] logger.info('parsing %i external ids', len(external_ids)) if d.get('passportIssuingCountry') and d.get('passportNumber'): external_ids.append( (co.externalid_pass_number, co.make_passport_number(d.get('passportIssuingCountry'), d.get('passportNumber')))) if d.get('norwegianIdentificationNumber'): external_ids.append( (co.externalid_fodselsnr, d.get('norwegianIdentificationNumber'))) return filter_elements(external_ids) def _get_ou(database, placecode=None): """Populate a Cerebrum-OU-object from the DB.""" if not placecode: return None ou = Factory.get('OU')(database) ou.clear() try: ou.find_stedkode( *map(''.join, zip(*[iter(str( placecode))] * 2)) + [cereconf.DEFAULT_INSTITUSJONSNR] ) return ou except Errors.NotFoundError: return None def _sap_assignments_to_affiliation_map(): co = Factory.get('Constants') return {'administrative': co.affiliation_status_ansatt_tekadm, 'academic': co.affiliation_status_ansatt_vitenskapelig} def parse_affiliations(database, d): """Parse data from SAP. Return affiliations and leader group ids. :type d: dict :param d: Data from SAP :rtype: [tuple(('ou_id': 3), ('affiliation', PersonAffiliation('ANSATT')), ('status', PersonAffStatus('ANSATT', 'tekadm')), (precedence', (50, 50)))] :return: A list of dicts with the fields that should be updated AND a list of leader group ids where the person should be a member""" co = Factory.get('Constants') affiliations = [] leader_group_ids = [] for x in d.get('assignments', {}).get('results', []): status = _sap_assignments_to_affiliation_map().get( x.get('jobCategory')) if not status: logger.warn('parse_affiliations: Unknown job category') # Unknown job category continue ou = _get_ou(database, placecode=x.get('locationId')) if not ou: logger.warn( 'OU {} does not exist, ' 'cannot parse affiliation {} for {}'.format( x.get('locationId'), status, x.get('personId'))) continue main = x.get('primaryAssignmentFlag') if x.get('managerFlag'): leader_group_ids.append(get_automatic_group( database, text_type(x.get('locationId')), LEADER_GROUP_PREFIX ).entity_id) affiliations.append({ 'ou_id': ou.entity_id, 'affiliation': co.affiliation_ansatt, 'status': status, 'precedence': ( (INTEGER_TYPE(50), INTEGER_TYPE(50)) if main else None) }) logger.info('parsed %i affiliations', len(affiliations)) logger.info('parsed %i leader groups', len(leader_group_ids)) return affiliations, leader_group_ids def _sap_roles_to_affiliation_map(): co = Factory.get('Constants') return OrderedDict( [('INNKJØPER', co.affiliation_tilknyttet_innkjoper), ('EF-FORSKER', co.affiliation_tilknyttet_ekst_forsker), ('EMERITUS', co.affiliation_tilknyttet_emeritus), ('BILAGSLØNN', co.affiliation_tilknyttet_bilag), ('GJ-FORSKER', co.affiliation_tilknyttet_gjesteforsker), ('ASSOSIERT', co.affiliation_tilknyttet_assosiert_person), ('EF-STIP', co.affiliation_tilknyttet_ekst_stip), ('GRP-LÆRER', co.affiliation_tilknyttet_grlaerer), ('EKST-KONS', co.affiliation_tilknyttet_ekst_partner), ('PCVAKT', co.affiliation_tilknyttet_pcvakt), ('EKST-PART', co.affiliation_tilknyttet_ekst_partner), ('KOMITEMEDLEM', co.affiliation_tilknyttet_komitemedlem), ('STEDOPPLYS', None), ('POLS-ANSAT', None)]) def parse_roles(database, data): """Parse data from SAP and return existing roles. :type d: dict :param d: Data from SAP :rtype: [tuple(('ou_id': 3), ('affiliation', PersonAffiliation('TILKNYTTET')), ('status', PersonAffStatus('TILKNYTTET', 'pcvakt')), (precedence', None))] :return: A list of tuples representing them roles.""" role2aff = _sap_roles_to_affiliation_map() r = [] for role in data.get('roles', {}).get('results', []): ou = _get_ou(database, placecode=role.get('locationId')) if not ou: logger.warn('OU %r does not exist, ' 'cannot parse affiliation %r for %r', role.get('locationId'), role2aff.get(role.get('roleName')), data.get('personId')) elif role2aff.get(role.get('roleName')): r.append({'ou_id': ou.entity_id, 'affiliation': role2aff.get( role.get('roleName')).affiliation, 'status': role2aff.get(role.get('roleName')), 'precedence': None}) logger.info('parsed %i roles', len(r)) return sorted(r, key=(lambda x: role2aff.values().index(x.get('status')) if x.get('status') in role2aff.values() else len(r)), reverse=True) def _parse_hr_person(database, source_system, data): """Collects parsed information from SAP.""" co = Factory.get('Constants') affiliations, leader_group_ids = parse_affiliations(database, data) return { 'id': data.get('personId'), 'addresses': parse_address(data), 'names': parse_names(data), 'birth_date': DateTime.DateFrom( data.get('dateOfBirth')), 'gender': {'Kvinne': co.gender_female, 'Mann': co.gender_male}.get( data.get('gender'), co.gender_unknown), 'external_ids': parse_external_ids(data), 'contacts': parse_contacts(data), 'leader_group_ids': leader_group_ids, 'affiliations': affiliations, 'roles': parse_roles(database, data), 'titles': parse_titles(data), 'reserved': not data.get('allowedPublicDirectoryFlag') } def _request_sap_data(config, url, params=None, ignore_read_password=False): if not params: params = {} if ignore_read_password: headers = {'Accept': 'application/json', 'X-Gravitee-API-Key': 'true'} else: headers = {'Accept': 'application/json', 'X-Gravitee-API-Key': read_password( user=config.auth_user, system=config.auth_system)} try: logger.debug4('Fetching %r', url) response = requests.get(url, headers=headers, params=params) logger.debug4('Fetch completed') except Exception as e: # Be polite on connection errors. Connection errors seldom fix # themselves quickly. import time time.sleep(1) raise RemoteSourceUnavailable(str(e)) if not verify_sap_header(response.headers): logger.warn('Source system not reached') raise SourceSystemNotReachedError return response def _parse_sap_data(response, url=None): if response.status_code == 200: return json.loads(response.text).get('d', None) elif response.status_code == 404: raise EntityDoesNotExistInSourceSystemError('404: Not Found') else: raise RemoteSourceError( 'Could not fetch {} from remote source: {}: {}'.format( url, response.status_code, response.reason)) SAP_ATTRIBUTE_NAMES = { 'assignments': {'start': 'originalHireDate', 'id': 'assignmentId'}, 'roles': {'start': 'effectiveStartDate', 'id': 'roleId'} } def _add_roles_and_assignments(person_data, config, ignore_read_password): """Add roles and assignments to person_data received from SAP The person_data does not include roles and assignments, but rather the uri to get it from. This method fetches it and adds it to person_data. :return reschedule_date: Date when the person should be reprocessed :rtype: datetime.date or None """ hire_date_offset = datetime.timedelta( days=cereconf.SAP_START_DATE_OFFSET) reschedule_date = None for key in person_data: if (isinstance(person_data.get(key), dict) and '__deferred' in person_data.get(key) and 'uri' in person_data.get(key).get('__deferred') and key in SAP_ATTRIBUTE_NAMES.keys()): # Fetch, unpack and store role/assignment data deferred_uri = person_data.get(key).get('__deferred').get( 'uri') # We filter by effectiveEndDate >= today to also get # future assignments and roles filter_param = { '$filter': "effectiveEndDate ge '{today}'".format( today=datetime.date.today()) } response = _request_sap_data( config, deferred_uri, params=filter_param, ignore_read_password=ignore_read_password) data = _parse_sap_data(response, url=deferred_uri) results_to_add = [] for result in data.get('results'): start_key = SAP_ATTRIBUTE_NAMES[key]['start'] try: effective_start_date = ( parse_date(result.get(start_key)) - hire_date_offset ) except (ValueError, AttributeError, ISOFormatError): logger.error('Invalid date %s', result.get(start_key)) results_to_add.append(result) else: if datetime.date.today() >= effective_start_date: results_to_add.append(result) elif (reschedule_date is None or effective_start_date < reschedule_date): reschedule_date = effective_start_date logger.info('%s: %s, %s: %s → reschedule_date: %s', SAP_ATTRIBUTE_NAMES[key]['id'], result.get(SAP_ATTRIBUTE_NAMES[key]['id']), start_key, result.get(start_key), reschedule_date) person_data.update({key: {'results': results_to_add}}) return reschedule_date def get_hr_person(config, database, source_system, url, ignore_read_password=False): """Collect a person entry from the remote source system and parse the data. If a person has assignments or roles which are not yet in effect, they will not be added to the hr_person. Instead the message will be rescheduled so that it can be reprocessed at a later time. :param config: Authentication data :param database: Database object :param source_system: The source system code :param url: The URL to contact for collection :param ignore_read_password: Do not include a valid api-key in header :rtype: tuple :return The parsed data from the remote source system and reschedule_date :raises: RemoteSourceUnavailable if the remote system can't be contacted""" def _get_person_data(): response = _request_sap_data(config, url, ignore_read_password=ignore_read_password) person_data = _parse_sap_data(response, url=url) reschedule_date = _add_roles_and_assignments(person_data, config, ignore_read_password) return person_data, reschedule_date person_data, reschedule_date = _get_person_data() return (_parse_hr_person(database, source_system, person_data), reschedule_date) def get_cerebrum_person(database, ids): """Get a person object from Cerebrum. If the person does not exist in Cerebrum, the returned object is clear()'ed""" pe = Factory.get('Person')(database) try: pe.find_by_external_ids(*ids) logger.info('Found existing person with id: %r', pe.entity_id) except Errors.NotFoundError: logger.info('Could not find existing person with one of ids: %r', ids) pe.clear() except Errors.TooManyRowsError as e: raise EntityNotResolvableError( 'Person in source system maps to multiple persons in Cerebrum. ' 'Manual intervention required: {}'.format(e)) return pe def update_account_affs(method): def wrapper(database, cerebrum_person, ou_id, affiliation): """Calls method if a person's account satisfies certain conditions :param database: A database object :param cerebrum_person: The Person object to be updated. :param ou_id: The ou_id code :param affiliation: The affiliation code """ accounts = cerebrum_person.get_accounts() if len(accounts) != 1: logger.info( 'Person id %r does not have exactly one account', cerebrum_person.entity_id) return ac = Factory.get('Account')(database) ac.find(accounts[0]['account_id']) co = Factory.get('Constants')(database) account_types = ac.get_account_types() if method.__name__ is AccountClass.del_account_type.__name__: if len(account_types) == 1: logger.info('Cannot delete last account_type') return if not ac.list_accounts_by_type(ou_id=ou_id, affiliation=affiliation, account_id=ac.entity_id): logger.info('account_type already deleted ' '(aff: %r, ou_id: %i)', affiliation, ou_id) return if method.__name__ is AccountClass.set_account_type.__name__: for at in account_types: if (at['ou_id'], at['affiliation']) == (ou_id, affiliation): logger.info('account_type already exists ' '(aff: %r, ou_id: %i)', affiliation, ou_id) return for account_type in account_types: if not int(co.affiliation_ansatt) == account_type['affiliation']: logger.info('Account has affiliation(s) besides ' '%r', co.affiliation_ansatt) return aff_info = cerebrum_person.list_affiliations( person_id=account_type['person_id'], ou_id=account_type['ou_id'], affiliation=account_type['affiliation'], ) if aff_info: if not int(co.system_sap) == aff_info[0]['source_system']: logger.info('Account has affiliation from source(s) other ' 'than %r', co.system_sap) return logger.info('%r for account: %r', method.__name__, ac.entity_id) method(ac, ou_id, affiliation) return wrapper del_account_type = update_account_affs(AccountClass.del_account_type) set_account_type = update_account_affs(AccountClass.set_account_type) def _stringify_for_log(data): """Convert data to appropriate types for logging.""" from Cerebrum.Constants import _CerebrumCode import collections if isinstance(data, _CerebrumCode): return unicode(data) elif isinstance(data, basestring): return data elif isinstance(data, collections.Mapping): return dict(map(_stringify_for_log, data.iteritems())) elif isinstance(data, collections.Iterable): return type(data)(map(_stringify_for_log, data)) else: return data def update_person(database, source_system, hr_person, cerebrum_person): """Update person with birth date and gender.""" if not (cerebrum_person.gender and cerebrum_person.birth_date and cerebrum_person.gender == hr_person.get('gender') and cerebrum_person.birth_date == hr_person.get('birth_date')): cerebrum_person.populate( hr_person.get('birth_date'), hr_person.get('gender')) cerebrum_person.write_db() logger.info('Added birth date %r and gender %r for %i', hr_person.get('birth_date'), hr_person.get('gender'), cerebrum_person.entity_id) def _find_affiliations(cerebrum_person, hr_affs, affiliation_map, source_system, mode): consider_affiliations = filter(lambda x: x, affiliation_map().values()) cerebrum_affiliations = cerebrum_person.list_affiliations( person_id=cerebrum_person.entity_id, status=consider_affiliations, source_system=source_system) # Format of a hr_aff: { str: [] } in_hr = map( lambda d: tuple( sorted( filter( lambda (k, v): k != 'precedence', d.items() ) ) ), hr_affs) in_cerebrum = map( lambda x: tuple( sorted( filter_elements( translate_keys( x, {'ou_id': 'ou_id', 'affiliation': 'affiliation', 'status': 'status'} ) ) ) ), cerebrum_affiliations) if mode == 'remove': return [ dict(filter(lambda (k, v): k in ('ou_id', 'affiliation'), x) + (('source', source_system),)) for x in set(in_cerebrum) - set(in_hr)] elif mode == 'add': to_add = set(in_hr) - set(in_cerebrum) to_ensure = set(in_hr) & set(in_cerebrum) return [dict(x) for x in to_add | to_ensure] else: raise Errors.ProgrammingError( 'Invalid mode {} supplied to _find_affiliations'.format( repr(mode))) def update_affiliations(database, source_system, hr_person, cerebrum_person): """Update a person in Cerebrum with the latest affiliations. :param database: A database object :param source_system: The source system code :param hr_person: The parsed data from the remote source system :param cerebrum_person: The Person object to be updated. """ for affiliation in _find_affiliations( cerebrum_person, hr_person.get('affiliations'), _sap_assignments_to_affiliation_map, source_system, 'remove'): del_account_type(database, cerebrum_person, affiliation['ou_id'], affiliation['affiliation']) cerebrum_person.delete_affiliation(**affiliation) logger.info('Removing affiliation %r for id: %r', _stringify_for_log(affiliation), cerebrum_person.entity_id) for affiliation in _find_affiliations( cerebrum_person, hr_person.get('affiliations'), _sap_assignments_to_affiliation_map, source_system, 'add'): cerebrum_person.populate_affiliation(source_system, **affiliation) logger.info('Adding affiliation %r for id: %r', _stringify_for_log(affiliation), cerebrum_person.entity_id) cerebrum_person.write_db() for affiliation in _find_affiliations( cerebrum_person, hr_person.get('affiliations'), _sap_assignments_to_affiliation_map, source_system, 'add'): set_account_type(database, cerebrum_person, affiliation['ou_id'], affiliation['affiliation']) logger.info('Setting account type for id: %r', cerebrum_person.entity_id) def update_roles(database, source_system, hr_person, cerebrum_person): """Update a person in Cerebrum with the latest roles. :param database: A database object :param source_system: The source system code :param hr_person: The parsed data from the remote source system :param cerebrum_person: The Person object to be updated. """ for role in _find_affiliations( cerebrum_person, hr_person.get('roles'), _sap_roles_to_affiliation_map, source_system, 'remove'): cerebrum_person.delete_affiliation(**role) logger.info('Removing role %r for id: %r', _stringify_for_log(role), cerebrum_person.entity_id) for role in _find_affiliations( cerebrum_person, hr_person.get('roles'), _sap_roles_to_affiliation_map, source_system, 'add'): cerebrum_person.populate_affiliation(source_system, **role) logger.info('Ensuring role %r for id: %r', _stringify_for_log(role), cerebrum_person.entity_id) def update_names(database, source_system, hr_person, cerebrum_person): """Update a person in Cerebrum with fresh names. :param database: A database object :param source_system: The source system code :param hr_person: The parsed data from the remote source system :param cerebrum_person: The Person object to be updated. """ co = Factory.get('Constants')(database) try: names = set(map(lambda name_type: (name_type, cerebrum_person.get_name( source_system, name_type)), [co.name_first, co.name_last])) except Errors.NotFoundError: names = set() to_remove = names - set(hr_person.get('names')) to_add = set(hr_person.get('names')) - names if to_remove: logger.info('Purging names of types %r for id: %r', map(lambda (k, _): _stringify_for_log(k), to_remove), cerebrum_person.entity_id) cerebrum_person.affect_names( source_system, *map(lambda (k, _): k, to_remove | to_add)) for (k, v) in to_add: cerebrum_person.populate_name(k, v) logger.info('Adding name %r of type %r for id: %r', v, k, cerebrum_person.entity_id) # Transform list of db_rows to a set of (address_type, (('city': '', …))) row_transform = (lambda key_type, key_label, squash_keys, elements: set(map(lambda e: (key_type(e[key_label]), tuple(filter(lambda (k, _): k not in squash_keys, e.items()))), elements))) def update_external_ids(database, source_system, hr_person, cerebrum_person): """Update a person in Cerebrum with appropriate external ids. :param database: A database object :param source_system: The source system code :param hr_person: The parsed data from the remote source system :param cerebrum_person: The Person object to be updated. """ co = Factory.get('Constants')(database) external_ids = set(map(lambda e: (e['id_type'], e['external_id']), cerebrum_person.get_external_id( source_system=source_system))) to_remove = external_ids - set(hr_person.get('external_ids')) to_add = set(hr_person.get('external_ids')) - external_ids cerebrum_person.affect_external_id( source_system, *map(lambda (k, _): k, to_remove | to_add)) if to_remove: logger.info( 'Purging externalids of types %r for id: %r', map(lambda (k, _): _stringify_for_log(co.EntityExternalId(k)), to_remove), cerebrum_person.entity_id) for (k, v) in to_add: cerebrum_person.populate_external_id( source_system, k, v) logger.info('Adding externalid %r for id: %r', (_stringify_for_log(co.EntityExternalId(k)), v), cerebrum_person.entity_id) def update_addresses(database, source_system, hr_person, cerebrum_person): """Update a person in Cerebrum with addresses. :param database: A database object :param source_system: The source system code :param hr_person: The parsed data from the remote source system :param cerebrum_person: The Person object to be updated. """ co = Factory.get('Constants')(database) addresses = row_transform(co.Address, 'address_type', ('entity_id', 'source_system', 'address_type', 'p_o_box', 'country'), cerebrum_person.get_entity_address( source=source_system)) for (k, v) in addresses - set(hr_person.get('addresses')): cerebrum_person.delete_entity_address(source_system, k) logger.info('Removing address %r for id: %r', (_stringify_for_log(k), v), cerebrum_person.entity_id) for (k, v) in set(hr_person.get('addresses')) - addresses: cerebrum_person.add_entity_address(source_system, k, **dict(v)) logger.info('Adding address %r for id: %r', (_stringify_for_log(k), v), cerebrum_person.entity_id) def update_contact_info(database, source_system, hr_person, cerebrum_person): """Update a person in Cerebrum with contact information (telephone, etc.). :param database: A database object :param source_system: The source system code :param hr_person: The parsed data from the remote source system :param cerebrum_person: The Person object to be updated. """ co = Factory.get('Constants')(database) contacts = row_transform(co.ContactInfo, 'contact_type', ('entity_id', 'source_system', 'contact_type', 'contact_description', 'contact_alias', 'last_modified'), cerebrum_person.get_contact_info( source=source_system)) for (k, v) in contacts - set(hr_person.get('contacts')): (p, v, _d) = (value for (_, value) in v) cerebrum_person.delete_contact_info(source_system, k, p) logger.info('Removing contact (%r) of type %r with preference %r for ' 'id: %r', v, _stringify_for_log(k), p, cerebrum_person.entity_id) for (k, v) in set(hr_person.get('contacts')) - contacts: (p, v, _d) = (value for (_, value) in v) cerebrum_person.add_contact_info(source_system, k, v, p) logger.info('Adding contact %r of type %r with preference %r for ' 'id: %r', v, _stringify_for_log(k), p, cerebrum_person.entity_id) def update_titles(database, source_system, hr_person, cerebrum_person): """Update a person in Cerebrum with work and personal titles. :param database: A database object :param source_system: The source system code :param hr_person: The parsed data from the remote source system :param cerebrum_person: The Person object to be updated. """ co = Factory.get('Constants')(database) titles = set(map(lambda x: tuple(filter(lambda (k, v): k not in ('entity_id', 'entity_type'), x.items())), cerebrum_person.search_name_with_language( entity_id=cerebrum_person.entity_id, name_variant=[co.work_title, co.personal_title]))) for e in set(hr_person.get('titles')) - titles: cerebrum_person.add_name_with_language(**dict(e)) logger.info('Adding title %r for id: %r', _stringify_for_log(e), cerebrum_person.entity_id) for e in titles - set(hr_person.get('titles')): cerebrum_person.delete_name_with_language(**dict(e)) logger.info('Removing title %r for id: %r', _stringify_for_log(e), cerebrum_person.entity_id) def update_reservation(database, hr_person, cerebrum_person): """Manage reservation from public display for a person in Cerebrum. :param database: A database object :param source_system: The source system code :param hr_person: The parsed data from the remote source system :param cerebrum_person: The Person object to be updated. """ # TODO: Recode this function when we handle reservation on the fly gr = Factory.get('Group')(database) gr.find_by_name('SAP-elektroniske-reservasjoner') in_reserved_group = gr.has_member(cerebrum_person.entity_id) if hr_person.get('reserved') and not in_reserved_group: gr.add_member(cerebrum_person.entity_id) logger.info('Adding id: %r to reservation group', cerebrum_person.entity_id) elif not hr_person.get('reserved') and in_reserved_group: gr.remove_member(cerebrum_person.entity_id) logger.info('Removing id: %r from reservation group', cerebrum_person.entity_id) def cerebrum_leader_group_memberships(gr, co, cerebrum_person): return (r['group_id'] for r in gr.search(member_id=cerebrum_person.entity_id, name=LEADER_GROUP_PREFIX + '*', group_type=co.group_type_affiliation, filter_expired=True, fetchall=False)) def update_leader_group_memberships(database, hr_person, cerebrum_person): gr = Factory.get('Group')(database) co = Factory.get('Constants')(database) hr_memberships = set(hr_person.get('leader_group_ids')) cerebrum_memberships = set( cerebrum_leader_group_memberships(gr, co, cerebrum_person) ) logger.info('Assert (person: %s) is member of (leader_groups: %s)', cerebrum_person.entity_id, hr_memberships) update_memberships(gr, cerebrum_person.entity_id, cerebrum_memberships, hr_memberships) def perform_update(database, source_system, hr_person, cerebrum_person): """Update or create a person.""" logger.info('Starting perform_update for %r', hr_person.get('id')) update_person(database, source_system, hr_person, cerebrum_person) update_external_ids(database, source_system, hr_person, cerebrum_person) update_names(database, source_system, hr_person, cerebrum_person) update_addresses(database, source_system, hr_person, cerebrum_person) update_contact_info(database, source_system, hr_person, cerebrum_person) update_titles(database, source_system, hr_person, cerebrum_person) update_roles(database, source_system, hr_person, cerebrum_person) update_affiliations(database, source_system, hr_person, cerebrum_person) update_leader_group_memberships(database, hr_person, cerebrum_person) update_reservation(database, hr_person, cerebrum_person) logger.info('Perform_update for %r done', cerebrum_person.entity_id) def perform_delete(database, source_system, hr_person, cerebrum_person): """Delete a person.""" logger.info('Deleting: %r', cerebrum_person.entity_id) # Update person and external IDs if hr_person: update_person(database, source_system, hr_person, cerebrum_person) update_external_ids( database, source_system, hr_person, cerebrum_person) # Delete everything else update_names(database, source_system, {'names': []}, cerebrum_person) update_addresses(database, source_system, {'addresses': []}, cerebrum_person) update_contact_info(database, source_system, {'contacts': []}, cerebrum_person) update_titles(database, source_system, {'titles': []}, cerebrum_person) update_affiliations(database, source_system, {'affiliations': []}, cerebrum_person) update_roles(database, source_system, {'roles': []}, cerebrum_person) update_reservation(database, {'reserved': False}, cerebrum_person) logger.info('%r deleted', cerebrum_person.entity_id) def handle_person(database, source_system, url, datasource=get_hr_person): """Fetch info from the remote system, and perform changes. :param database: A database object :param source_system: The source system code :param url: The URL to the person object in the HR systems WS. :param datasource: The function used to fetch / parse the resource. :return reschedule_date: Date when the person should be reprocessed :rtype: datetime.date or None""" try: hr_person, reschedule_date = datasource(database, source_system, url) logger.info('Handling person %r from source system %r', _stringify_for_log(hr_person.get('names')), source_system) except EntityDoesNotExistInSourceSystemError: logger.warn('URL %s does not resolve in source system %r (404) - ' 'deleting from Cerebrum', url, source_system) hr_person = reschedule_date = None if hr_person: cerebrum_person = get_cerebrum_person( database, map(lambda (k, v): (k, v), hr_person.get('external_ids'))) else: # assume manual ticket employee_number = url.split('(')[-1].strip(')') co = Factory.get('Constants')(database) cerebrum_person = Factory.get('Person')(database) cerebrum_person.find_by_external_id( id_type=co.externalid_sap_ansattnr, external_id=employee_number, source_system=co.system_sap, entity_type=co.entity_person) if hr_person and (hr_person.get('affiliations') or hr_person.get('roles')): perform_update(database, source_system, hr_person, cerebrum_person) elif cerebrum_person.entity_type: # entity_type as indication of instance perform_delete(database, source_system, hr_person, cerebrum_person) else: logger.info('handle_person: no action performed') return reschedule_date logger.info('handle_person: commiting changes') cerebrum_person.write_db() database.commit() logger.info('handle_person: changes committed') return reschedule_date def _reschedule_message(publisher, routing_key, message, reschedule_date): logger.info('Reschedule the message for %s', reschedule_date) reschedule_time = apply_timezone(date_to_datetime(reschedule_date)) # Convert to timestamp and add to message message['nbf'] = int(time.mktime(reschedule_time.timetuple())) with publisher: publisher.publish(routing_key, message) def callback(database, source_system, routing_key, content_type, body, datasource=get_hr_person, publisher=None): """Call appropriate handler functions.""" try: message = json.loads(body) url = message.get('sub') except Exception as e: logger.warn('Received malformed message %r', body) return True message_processed = True try: reschedule_date = handle_person(database, source_system, url, datasource=datasource) logger.info('Successfully processed %r', body) except RemoteSourceUnavailable: message_processed = False except (RemoteSourceError, ErroneousSourceData) as e: logger.error('Failed processing %r:\n %r: %r', body, type(e).__name__, e) except EntityNotResolvableError as e: logger.critical('Failed processing %r:\n %r: %r', body, type(e).__name__, e) except Exception as e: logger.error('Failed processing %r:\n %r', body, e, exc_info=True) else: if reschedule_date is not None: try: _reschedule_message(publisher, routing_key, message, reschedule_date) except Exception as e: logger.error('Failed to reschedule message \n %r', e) finally: # Always rollback, since we do an implicit begin and we want to discard # possible outstanding changes. database.rollback() return message_processed def load_mock(mock_file): """Call appropriate handler functions.""" with open(mock_file) as f: data = json.load(f).get('d') import pprint logger.debug1( 'Using mock with data:\n%s', pprint.pformat(data, indent=4)) return data def main(args=None): """Start consuming messages.""" import argparse import functools parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-c', '--config', dest='configfile', metavar='FILE', default=None, help='Use a custom configuration file for AMPQ ' 'consumer') parser.add_argument('-p', '--publisher-config', dest='publisher_configfile', metavar='FILE', default=None, help='Use custom configuration for AMPQ publisher ' 'used to reschedule messages') parser.add_argument('-m', '--mock', dest='mock', metavar='FILE', default=None, help='Load person object from JSON file') parser.add_argument(u'-u', u'--url', action=type( str(''), (argparse.Action,), {'__call__': lambda s, p, ns, v, o=None: setattr( ns, s.dest, json.dumps({'sub': v}))}), dest=u'url', metavar='<url>', type=text_type, default=None, help=u'Load url manually') parser.add_argument('--dryrun', dest='dryrun', action='store_true', default=False, help='Do not commit changes') args = parser.parse_args(args) prog_name = parser.prog.rsplit('.', 1)[0] logger.info('Starting %r', prog_name) database = Factory.get('Database')() database.cl_init(change_program=prog_name) source_system = Factory.get('Constants')(database).system_sap config = load_config(SAPConsumerConfig, 'consumer_sap', filepath=args.configfile) publisher_config = load_config(PublisherConfig, 'sap_publisher', filepath=args.publisher_configfile) if args.dryrun: database.commit = database.rollback if args.mock: import pprint mock_data = load_mock(args.mock) parsed_mock_data = _parse_hr_person(database, source_system, mock_data) logger.debug1('Parsed mock data as:\n%r', pprint.pformat(parsed_mock_data)) body = json.dumps({'sub': None}) callback(database, source_system, '', '', body, datasource=lambda *x: (parsed_mock_data, None)) elif args.url: datasource = functools.partial(get_hr_person, config.ws, ignore_read_password=True) publisher = MockClient(publisher_config) callback( database, source_system, # An example of a routing key which will land in the queue # q_cerebrum_sap_consumer: 'no.uio.sap.scim.employees.modify', '', args.url, datasource=datasource, publisher=publisher, ) else: logger.info('Starting %r', prog_name) datasource = functools.partial( get_hr_person, config.ws) publisher = AMQP091Publisher(publisher_config) consumer = get_consumer( functools.partial( callback, database, source_system, datasource=datasource, publisher=publisher, ), config=config.consumer) with consumer: try: consumer.start() except KeyboardInterrupt: consumer.stop() consumer.close() logger.info('Stopping %r', prog_name) if __name__ == "__main__": main() ```
[ { "content": "Here is the source code:\n```python\n# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('auth', '__first__')]\n\n ope...
[ { "content": "Here is the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('auth', '__first...
```python # -*- coding: utf-8 -*- from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('auth', '__first__')] operations = [ migrations.CreateModel( name='EighthActivity', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(unique=True, max_length=63)), ('description', models.CharField(max_length=255, blank=True)), ('presign', models.BooleanField(default=False)), ('one_a_day', models.BooleanField(default=False)), ('both_blocks', models.BooleanField(default=False)), ('sticky', models.BooleanField(default=False)), ('special', models.BooleanField(default=False)), ('restricted', models.BooleanField(default=False)), ('freshmen_allowed', models.BooleanField(default=False)), ('sophomores_allowed', models.BooleanField(default=False)), ('juniors_allowed', models.BooleanField(default=False)), ('seniors_allowed', models.BooleanField(default=False)), ('deleted', models.BooleanField(default=False)), ('groups_allowed', models.ManyToManyField(related_name='restricted_activity_set', to='auth.Group', blank=True)), ], options={'verbose_name_plural': 'eighth activities'}, bases=(models.Model,), ), migrations.CreateModel( name='EighthBlock', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('date', models.DateField()), ('block_letter', models.CharField(max_length=1)), ('locked', models.BooleanField(default=False)), ], options={'ordering': ('date', 'block_letter')}, bases=(models.Model,), ), migrations.CreateModel( name='EighthRoom', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=63)), ('capacity', models.SmallIntegerField(default=-1)), ], options={}, bases=(models.Model,), ), migrations.CreateModel( name='EighthScheduledActivity', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('comments', models.CharField(max_length=255, blank=True)), ('capacity', models.SmallIntegerField(null=True, blank=True)), ('attendance_taken', models.BooleanField(default=False)), ('cancelled', models.BooleanField(default=False)), ('activity', models.ForeignKey(to='eighth.EighthActivity', on_delete=models.CASCADE)), ('block', models.ForeignKey(to='eighth.EighthBlock', on_delete=models.CASCADE)), ], options={'verbose_name_plural': 'eighth scheduled activities'}, bases=(models.Model,), ), migrations.CreateModel( name='EighthSignup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('time', models.DateTimeField(auto_now=True)), ('after_deadline', models.BooleanField(default=False)), ('previous_activity_name', models.CharField(max_length=100, blank=True)), ('previous_activity_sponsors', models.CharField(max_length=100, blank=True)), ('pass_accepted', models.BooleanField(default=False)), ('was_absent', models.BooleanField(default=False)), ('scheduled_activity', models.ForeignKey(related_name='eighthsignup_set', to='eighth.EighthScheduledActivity', on_delete=models.CASCADE)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)), ], options={}, bases=(models.Model,), ), migrations.CreateModel( name='EighthSponsor', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('first_name', models.CharField(max_length=63)), ('last_name', models.CharField(max_length=63)), ('online_attendance', models.BooleanField(default=True)), ('user', models.OneToOneField(null=True, blank=True, to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)), ], options={}, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='eighthsponsor', unique_together={('first_name', 'last_name', 'user', 'online_attendance')}, ), migrations.AlterUniqueTogether( name='eighthsignup', unique_together={('user', 'scheduled_activity')}, ), migrations.AddField( model_name='eighthscheduledactivity', name='members', field=models.ManyToManyField(related_name='eighthscheduledactivity_set', through='eighth.EighthSignup', to=settings.AUTH_USER_MODEL), preserve_default=True, ), migrations.AddField( model_name='eighthscheduledactivity', name='rooms', field=models.ManyToManyField(to='eighth.EighthRoom', blank=True), preserve_default=True, ), migrations.AddField( model_name='eighthscheduledactivity', name='sponsors', field=models.ManyToManyField(to='eighth.EighthSponsor', blank=True), preserve_default=True, ), migrations.AlterUniqueTogether( name='eighthscheduledactivity', unique_together={('block', 'activity')}, ), migrations.AddField( model_name='eighthblock', name='activities', field=models.ManyToManyField(to='eighth.EighthActivity', through='eighth.EighthScheduledActivity', blank=True), preserve_default=True, ), migrations.AlterUniqueTogether( name='eighthblock', unique_together={('date', 'block_letter')}, ), migrations.AddField( model_name='eighthactivity', name='rooms', field=models.ManyToManyField(to='eighth.EighthRoom', blank=True), preserve_default=True, ), migrations.AddField( model_name='eighthactivity', name='sponsors', field=models.ManyToManyField(to='eighth.EighthSponsor', blank=True), preserve_default=True, ), migrations.AddField( model_name='eighthactivity', name='users_allowed', field=models.ManyToManyField(related_name='restricted_activity_set', to=settings.AUTH_USER_MODEL, blank=True), preserve_default=True, ), ] ```
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n#!/usr/bin/env python\n#\n# Navigator.py\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 2 as\n# published by the Free Software...
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n#!/usr/bin/env python\n#\n# Navigator.py\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 2 as\n# published by t...
```python #!/usr/bin/env python # # Navigator.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA import os import hashlib import logging from thug.Magic.Magic import Magic from .JSClass import JSClass from .MimeTypes import MimeTypes from .Plugins import Plugins from .HTTPSessionException import AboutBlank from .HTTPSessionException import FetchForbidden from .HTTPSessionException import InvalidUrl from .HTTPSessionException import ThresholdExpired log = logging.getLogger("Thug") class Navigator(JSClass): def __init__(self, personality, window = None): self.personality = log.ThugOpts.Personality[personality] self._plugins = Plugins() # An array of the plugins installed in the browser self._mimeTypes = MimeTypes() self._window = window for p in self._mimeTypes.values(): self._plugins.append(p['enabledPlugin']) self.__init_personality() self.filecount = 0 def __init_personality(self): if log.ThugOpts.Personality.isIE(): self.__init_personality_IE() return if log.ThugOpts.Personality.isFirefox(): self.__init_personality_Firefox() return if log.ThugOpts.Personality.isChrome(): self.__init_personality_Chrome() return if log.ThugOpts.Personality.isSafari(): self.__init_personality_Safari() return def __init_personality_IE(self): from .UserProfile import UserProfile self.mimeTypes = self._mimeTypes self.plugins = self._plugins self.taintEnabled = self._taintEnabled self.appMinorVersion = self._appMinorVersion self.cpuClass = self._cpuClass self.browserLanguage = self._browserLanguage self.systemLanguage = self._systemLanguage self.userLanguage = self._userLanguage if log.ThugOpts.Personality.browserMajorVersion < 9: self.userProfile = UserProfile() def __init_personality_Firefox(self): self.mimeTypes = self._mimeTypes self.plugins = self._plugins self.taintEnabled = self._taintEnabled self.oscpu = self._oscpu self.buildID = self._buildID self.product = self._product self.productSub = self._productSub self.vendor = self._vendor self.vendorSub = self._vendorSub self.language = self._language self.preference = self._preference self.registerContentHandler = self._registerContentHandler self.registerProtocolHandler = self._registerProtocolHandler def __init_personality_Chrome(self): self.mimeTypes = self._mimeTypes self.plugins = self._plugins self.product = self._product self.productSub = self._productSub self.vendor = self._vendor self.vendorSub = self._vendorSub self.language = self._language def __init_personality_Safari(self): self.mimeTypes = self._mimeTypes self.plugins = self._plugins self.product = self._product self.productSub = self._productSub self.vendor = self._vendor self.vendorSub = self._vendorSub self.language = self._language @property def window(self): return self._window @property def appCodeName(self): """ The internal "code" name of the current browser """ return self.personality['appCodeName'] @property def appName(self): """ The official name of the browser """ return self.personality['appName'] @property def appVersion(self): """ The version of the browser as a string """ return self.personality['appVersion'] @property def userAgent(self): """ The user agent string for the current browser """ return self.personality['userAgent'] @property def _buildID(self): """ The build identifier of the browser (e.g. "2006090803") """ return self.personality['buildID'] @property def cookieEnabled(self): """ A boolean indicating whether cookies are enabled """ return True @property def _language(self): """ A string representing the language version of the browser """ return "en" @property def onLine(self): """ A boolean indicating whether the browser is working online """ return True @property def _oscpu(self): """ A string that represents the current operating system """ return self.personality['oscpu'] @property def platform(self): """ A string representing the platform of the browser """ return self.personality['platform'] @property def _product(self): """ The product name of the current browser (e.g. "Gecko") """ return self.personality['product'] @property def _productSub(self): """ The build number of the current browser (e.g. "20060909") """ return self.personality['productSub'] @property def securityPolicy(self): """ An empty string. In Netscape 4.7x, returns "US & CA domestic policy" or "Export policy". """ return "" @property def _vendor(self): """ The vendor name of the current browser (e.g. "Netscape6") """ return self.personality['vendor'] @property def _vendorSub(self): """ The vendor name of the current browser (e.g. "Netscape6") """ return self.personality['vendorSub'] @property def _appMinorVersion(self): return self.personality['appMinorVersion'] @property def _browserLanguage(self): return "en" @property def _cpuClass(self): return "x86" @property def _systemLanguage(self): return "en" @property def _userLanguage(self): return "en" # Indicates whether the host browser is Java-enabled or not. def javaEnabled(self, *arg): return True # Lets code check to see if the document at a given URI is # available without using the network. def mozIsLocallyAvailable(self, *arg): return False # Sets a user preference. # self method is only available to privileged code, and you # should use XPCOM Preferences API instead. def _preference(self, *arg): pass # Allows web sites to register themselves as a possible handler # for a given MIME type. def _registerContentHandler(self, *arg): pass # New in Firefox 3 # Allows web sites to register themselves as a possible handler # for a given protocol. def _registerProtocolHandler(self, *arg): pass # Obsolete # JavaScript taint/untaint functions removed in JavaScript 1.2[1] def _taintEnabled(self, *arg): return True def fetch(self, url, method = "GET", headers = None, body = None, redirect_type = None, params = None, snippet = None): log.URLClassifier.classify(url) # The command-line option -x (--local-nofetch) prevents remote # content fetching so raise an exception and exit the method. if log.HTTPSession.no_fetch: raise FetchForbidden # Do not attempt to fetch content if the URL is "about:blank". if log.HTTPSession.about_blank(url): raise AboutBlank # URL normalization and fixing (if broken and the option is # enabled). url = log.HTTPSession.normalize_url(self._window, url) if url is None: raise InvalidUrl last_url = getattr(log, 'last_url', None) if last_url is None: last_url = self._window.url if redirect_type: log.ThugLogging.add_behavior_warn("[{} redirection] {} -> {}".format(redirect_type, last_url, url), snippet = snippet) log.ThugLogging.log_connection(last_url, url, redirect_type) else: log.ThugLogging.log_connection(last_url, url, "unknown") # The command-line option -t (--threshold) defines the maximum # number of pages to fetch. If the threshold is reached avoid # fetching the contents. if log.HTTPSession.threshold_expired(url): raise ThresholdExpired if headers is None: headers = dict() response = log.HTTPSession.fetch(url, method, self._window, self.userAgent, headers, body) if response is None: return None _url = log.ThugLogging.log_redirect(response, self._window) if _url: url = _url referer = response.request.headers.get('referer', 'None') log.ThugLogging.add_behavior_warn("[HTTP] URL: {} (Status: {}, Referer: {})".format(url, response.status_code, referer), snippet = snippet) ctype = response.headers.get('content-type', 'unknown') mime_base = os.path.join(log.ThugLogging.baseDir, ctype) md5 = hashlib.md5() md5.update(response.content) sha256 = hashlib.sha256() sha256.update(response.content) mtype = Magic(response.content).get_mime() data = { "content" : response.content, "status" : response.status_code, "md5" : md5.hexdigest(), "sha256" : sha256.hexdigest(), "fsize" : len(response.content), "ctype" : ctype, "mtype" : mtype } log.ThugLogging.add_behavior_warn("[HTTP] URL: {} (Content-type: {}, MD5: {})".format(response.url, ctype, data["md5"]), snippet = snippet) log.ThugLogging.log_location(url, data) if response.history: location = response.headers.get('location', None) if location and redirect_type not in ("URL found", "JNLP", "iframe", ): self._window.url = log.HTTPSession.normalize_url(self._window, location) if redirect_type in ("meta", ): self._window.url = log.HTTPSession.normalize_url(self._window, url) log.ThugLogging.store_content(mime_base, data["md5"], response.content) log.ThugLogging.log_file(response.content, response.url, params) if redirect_type in (None, 'window open', 'iframe', 'http-redirect', 'meta', ): log.last_url = response.url log.last_url_fetched = response.url if log.ThugOpts.features_logging: log.ThugLogging.Features.add_characters_count(len(response.text)) log.ThugLogging.Features.add_whitespaces_count(len([a for a in response.text if a.isspace()])) handler = log.MIMEHandler.get_handler(ctype) if handler: handler(response.url, response.content) return response ```
[ { "content": "Return the code exactly, with no changes:\n```python\nfrom Server.Game import *\nfrom threading import Thread\nfrom CodIa.tuto.models import User\nfrom CodIa.tuto.app import db\nimport threading\nimport time\nimport random\n\nclass PlayerThread(Thread):\n\n def __init__(self, GameThread, userna...
[ { "content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nfrom Server.Game import *\nfrom threading import Thread\nfrom CodIa.tuto.models import User\nfrom CodIa.tuto.app import db\nimport threading\nimport time\nimport random\n\nclass PlayerThread(Thread):\n\n def __init__(self, Ga...
```python from Server.Game import * from threading import Thread from CodIa.tuto.models import User from CodIa.tuto.app import db import threading import time import random class PlayerThread(Thread): def __init__(self, GameThread, username, ia): Thread.__init__(self) self.GameThread = GameThread self.username = username GameThread.barrierTours._parties += 1 self.ia = ia self.joueur = Player(ia,username,GameThread.game.gamesize) #GameThread.game.joueurs[username]=Player(ia,username,GameThread.game.gamesize) GameThread.joueursAAdd.append(self.joueur) GameThread.nbth += 1 def run(self): while True: #attend le début du tours # print("Barriere debut de tours "+str(threading.current_thread().name)) # print(self.GameThread.barrierTours.parties) self.GameThread.barrierTours.wait() #execute le code de l'IA self.executeIa() #print(self.GameThread.barrierEtape.parties) self.GameThread.barrierEtape.wait() self.calculePos() self.GameThread.barrierEtape.wait() agraille = self.join() #print("avant acquire") self.GameThread.barrierEtape.wait() self.GameThread.lockmanger.acquire() self.GameThread.aManger.append(agraille) #print("pendant") self.GameThread.lockmanger.release() #print("après release") self.GameThread.barrierManger.wait() if self.joueur.poidTotal<=0 and not self.joueur.end: self.joueur.end = True print("\033[91m Le Joueur "+self.joueur.username +" à perdu \033[0m") user = User.query.filter_by(pseudo=self.joueur.username).first() if user is not None: # print("\033[91m Zbra \033[0m") user.score += self.joueur.score db.session.commit() # time.sleep(1/60) # self.GameThread.nbth-=1 # self.GameThread.barrierTours._parties -= 1 def executeIa(self): pass def calculePos(self): # print("\033[91m caca \033[0m") # print(str(self.joueur.spheres[0].normeVitesse()) +" "+ str(self.joueur.spheres[0].normeVitesseMax())) res=0 for sphere in self.joueur.spheres: sphere.vectVitesse = sphere.vitesseNextTick() if sphere.normeVitesse() > sphere.normeVitesseMax(): # print("\033[91m caca \033[0m") sphere.vectVitesse[0] *= 0.9 sphere.vectVitesse[1] *= 0.9 # else : # print("\033[92m non caca \033[0m") sphere.vectPos = sphere.posNextTick() rand = random.randint(1,300) if sphere.taille > 50000 and rand==1: sphere.split(self.joueur) somme = 0 #print("=======================================================") for joueur in self.GameThread.game.joueurs.values(): somme += joueur.poidTotal #print("somme2"+str(somme)) #print("taille sphere max: "+str((sphere.taille))) #pass self.joueur.updateScore() def join(self): try: listjoueur = dict() for sphere in self.joueur.spheres: for joueur2 in self.GameThread.game.joueurs.values(): for sphere2 in joueur2.spheres: res = sphere.join(sphere2,joueur2) if(res != None): # if(not (listjoueur[res[0].username] in locals)): # listjoueur[res[0].username] = [] try: listjoueur[res[0].username].append(res[1]) except KeyError: listjoueur[res[0].username] = [] listjoueur[res[0].username].append(res[1]) except RuntimeError: print("\033[91m Nb de Thread :"+str(self.GameThread.barrierManger._parties)+", "+str(self.GameThread.nbth)+" \033[0m") return listjoueur ```
[ { "content": "Here is the code content:\n```python\n\"\"\"\nPyStratum\n\"\"\"\nimport configparser\nfrom pydoc import locate\n\nfrom cleo import Command, Input, Output\nfrom pystratum.RoutineLoader import RoutineLoader\n\nfrom pystratum.style.PyStratumStyle import PyStratumStyle\n\n\nclass LoaderCommand(Command...
[ { "content": "Here is the code content:\n<|memory_start|>```python\n\"\"\"\nPyStratum\n\"\"\"\nimport configparser\nfrom pydoc import locate\n\nfrom cleo import Command, Input, Output\nfrom pystratum.RoutineLoader import RoutineLoader\n\nfrom pystratum.style.PyStratumStyle import PyStratumStyle\n\n\nclass Loade...
```python """ PyStratum """ import configparser from pydoc import locate from cleo import Command, Input, Output from pystratum.RoutineLoader import RoutineLoader from pystratum.style.PyStratumStyle import PyStratumStyle class LoaderCommand(Command): """ Command for loading stored routines into a MySQL/MsSQL/PgSQL instance from pseudo SQL files loader {config_file : The audit configuration file} {file_names?* : Sources with stored routines} """ # ------------------------------------------------------------------------------------------------------------------ def execute(self, input_object: Input, output_object: Output) -> int: """ Executes this command. """ self.input = input_object self.output = output_object return self.handle() # ------------------------------------------------------------------------------------------------------------------ def handle(self) -> int: """ Executes loader command. """ self.output = PyStratumStyle(self.input, self.output) config_file = self.argument('config_file') sources = self.argument('file_names') return self.run_command(config_file, sources) # ------------------------------------------------------------------------------------------------------------------ def run_command(self, config_file, sources) -> int: """ :param str config_file: The name of config file. :param list sources: The list with source files. """ config = configparser.ConfigParser() config.read(config_file) rdbms = config.get('database', 'rdbms').lower() loader = self.create_routine_loader(rdbms) status = loader.main(config_file, sources) return status # ------------------------------------------------------------------------------------------------------------------ def create_routine_loader(self, rdbms: str) -> RoutineLoader: """ Factory for creating a Routine Loader objects (i.e. objects for loading stored routines into a RDBMS instance from (pseudo) SQL files. :param str rdbms: The target RDBMS (i.e. mysql, mssql or pgsql). :rtype: RoutineLoader """ # Note: We load modules and classes dynamically such that on the end user's system only the required modules # and other dependencies for the targeted RDBMS must be installed (and required modules and other # dependencies for the other RDBMSs are not required). if rdbms == 'mysql': module = locate('pystratum_mysql.MySqlRoutineLoader') return module.MySqlRoutineLoader(self.output) if rdbms == 'mssql': module = locate('pystratum_mssql.MsSqlRoutineLoader') return module.MsSqlRoutineLoader(self.output) if rdbms == 'pgsql': module = locate('pystratum_pgsql.PgSqlRoutineLoader') return module.PgSqlRoutineLoader(self.output) raise Exception("Unknown RDBMS '{0!s}'.".format(rdbms)) # ---------------------------------------------------------------------------------------------------------------------- ```
[ { "content": "Repeat the code precisely:\n```python\n#!/usr/bin/env python\n\nimport json, argparse\n\nparser = argparse.ArgumentParser()\nparser.description = '''Generate an RST list documenting PISM's diagnostics.'''\nparser.add_argument(\"FILE\", nargs=1)\noptions = parser.parse_args()\n\ndef print_heading(t...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport json, argparse\n\nparser = argparse.ArgumentParser()\nparser.description = '''Generate an RST list documenting PISM's diagnostics.'''\nparser.add_argument(\"FILE\", nargs=1)\noptions = parser.parse_args()\n\ndef...
```python #!/usr/bin/env python import json, argparse parser = argparse.ArgumentParser() parser.description = '''Generate an RST list documenting PISM's diagnostics.''' parser.add_argument("FILE", nargs=1) options = parser.parse_args() def print_heading(title, label, decoration): heading = """ .. _{label}: {title} {underline}""" print(heading.format(label=label, title=title, underline=decoration * len(title))) entry_start = """ #. ``{name}``""" template_single = """ :Units: {units} :Description: {long_name}""" std_name_template = """{padding}:Standard name: ``{standard_name}``""" comment_template = """{padding}:Comment: {comment}""" template_many = """ - ``{var_name}`` :Units: {units} :Description: {long_name}""" header = """.. -*- mode: rst -*- .. DO NOT EDIT. This file was generated using list_diagnostics.py. """ def print_diagnostics(diagnostics): def print_some(title, label, diagnostics): print_heading(title, label, "-") for name in sorted(diagnostics.keys()): print(entry_start.format(name=name)) if len(diagnostics[name]) == 1: template = template_single padding = " " * 3 else: template = template_many padding = " " * 5 for data in diagnostics[name]: var_name, units, long_name, standard_name, comment = data if len(units) == 0: units = "---" print(template.format(var_name=var_name, units=units, long_name=long_name)) if len(standard_name) > 0: print(std_name_template.format(padding=padding, standard_name=standard_name)) if len(comment) > 0: print(comment_template.format(padding=padding, comment=comment)) print(header) print_some("Spatially-variable fields", "sec-extra_vars", diagnostics["spatial"]) print_some("Scalar time-series", "sec-ts_vars", diagnostics["scalar"]) with open(options.FILE[0]) as f: print_diagnostics(json.load(f)) ```
[ { "content": "Return the code exactly, with no changes:\n```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2012-2018, CRS4\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software...
[ { "content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2012-2018, CRS4\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in...
```python # -*- coding: utf-8 -*- # # Copyright (c) 2012-2018, CRS4 # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import absolute_import import importlib from .messages import MESSAGES from .segments import SEGMENTS from .fields import FIELDS from .datatypes import DATATYPES, DATATYPES_STRUCTS from .groups import GROUPS from .tables import TABLES from hl7apy.exceptions import ChildNotFound ELEMENTS = {'Message': MESSAGES, 'Group': GROUPS, 'Segment': SEGMENTS, 'Field': FIELDS, 'Component': DATATYPES, 'SubComponent': DATATYPES, 'Datatypes_Structs': DATATYPES_STRUCTS, 'Table': TABLES} def get(name, element_type): try: return ELEMENTS[element_type][name] except KeyError: raise ChildNotFound(name) def find(name, where): """ >>> from hl7apy.core import Segment >>> from hl7apy import find_reference >>> find_reference('UNKNOWN', (Segment, ), '2.5.1') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ChildNotFound: No child named UNKNOWN """ for cls in where: try: return {'ref': get(name, cls.__name__), 'name': name, 'cls': cls} except ChildNotFound: pass raise ChildNotFound(name) def is_base_datatype(datatype): return datatype in BASE_DATATYPES def get_base_datatypes(): return BASE_DATATYPES def _load_base_datatypes(): base_datatypes = ('ST', 'ID', 'DT', 'DTM', 'FT', 'GTS', 'IS', 'NM', 'SI', 'TM', 'TX', 'WD') module = importlib.import_module("hl7apy.base_datatypes") dts = {} for cls in base_datatypes: cls = getattr(module, cls) dts[cls.__name__] = cls return dts BASE_DATATYPES = _load_base_datatypes() ST = BASE_DATATYPES['ST'] ID = BASE_DATATYPES['ID'] DT = BASE_DATATYPES['DT'] DTM = BASE_DATATYPES['DTM'] FT = BASE_DATATYPES['FT'] GTS = BASE_DATATYPES['GTS'] IS = BASE_DATATYPES['IS'] NM = BASE_DATATYPES['NM'] SI = BASE_DATATYPES['SI'] TM = BASE_DATATYPES['TM'] TX = BASE_DATATYPES['TX'] WD = BASE_DATATYPES['WD'] ```
[ { "content": "Here is the code content:\n```python\n# Copyright (c) 2017 Ansible Tower by Red Hat\n# All Rights Reserved.\nimport sys\nimport six\n\nfrom awx.main.utils.pglock import advisory_lock\nfrom awx.main.models import Instance, InstanceGroup\n\nfrom django.core.management.base import BaseCommand, Comman...
[ { "content": "Here is the code content:\n<|memory_start|>```python\n# Copyright (c) 2017 Ansible Tower by Red Hat\n# All Rights Reserved.\nimport sys\nimport six\n\nfrom awx.main.utils.pglock import advisory_lock\nfrom awx.main.models import Instance, InstanceGroup\n\nfrom django.core.management.base import Bas...
```python # Copyright (c) 2017 Ansible Tower by Red Hat # All Rights Reserved. import sys import six from awx.main.utils.pglock import advisory_lock from awx.main.models import Instance, InstanceGroup from django.core.management.base import BaseCommand, CommandError class InstanceNotFound(Exception): def __init__(self, message, changed, *args, **kwargs): self.message = message self.changed = changed super(InstanceNotFound, self).__init__(*args, **kwargs) class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('--queuename', dest='queuename', type=lambda s: six.text_type(s, 'utf8'), help='Queue to create/update') parser.add_argument('--hostnames', dest='hostnames', type=lambda s: six.text_type(s, 'utf8'), help='Comma-Delimited Hosts to add to the Queue (will not remove already assigned instances)') parser.add_argument('--controller', dest='controller', type=lambda s: six.text_type(s, 'utf8'), default='', help='The controlling group (makes this an isolated group)') parser.add_argument('--instance_percent', dest='instance_percent', type=int, default=0, help='The percentage of active instances that will be assigned to this group'), parser.add_argument('--instance_minimum', dest='instance_minimum', type=int, default=0, help='The minimum number of instance that will be retained for this group from available instances') def get_create_update_instance_group(self, queuename, instance_percent, instance_min): ig = InstanceGroup.objects.filter(name=queuename) created = False changed = False (ig, created) = InstanceGroup.objects.get_or_create(name=queuename) if ig.policy_instance_percentage != instance_percent: ig.policy_instance_percentage = instance_percent changed = True if ig.policy_instance_minimum != instance_min: ig.policy_instance_minimum = instance_min changed = True if changed: ig.save() return (ig, created, changed) def update_instance_group_controller(self, ig, controller): changed = False control_ig = None if controller: control_ig = InstanceGroup.objects.filter(name=controller).first() if control_ig and ig.controller_id != control_ig.pk: ig.controller = control_ig ig.save() changed = True return (control_ig, changed) def add_instances_to_group(self, ig, hostname_list): changed = False instance_list_unique = set([x.strip() for x in hostname_list if x]) instances = [] for inst_name in instance_list_unique: instance = Instance.objects.filter(hostname=inst_name) if instance.exists(): instances.append(instance[0]) else: raise InstanceNotFound(six.text_type("Instance does not exist: {}").format(inst_name), changed) ig.instances.add(*instances) instance_list_before = ig.policy_instance_list instance_list_after = instance_list_unique new_instances = set(instance_list_after) - set(instance_list_before) if new_instances: changed = True ig.policy_instance_list = ig.policy_instance_list + list(new_instances) ig.save() return (instances, changed) def handle(self, **options): instance_not_found_err = None queuename = options.get('queuename') if not queuename: raise CommandError("Specify `--queuename` to use this command.") ctrl = options.get('controller') inst_per = options.get('instance_percent') inst_min = options.get('instance_minimum') hostname_list = [] if options.get('hostnames'): hostname_list = options.get('hostnames').split(",") with advisory_lock(six.text_type('instance_group_registration_{}').format(queuename)): changed2 = False changed3 = False (ig, created, changed1) = self.get_create_update_instance_group(queuename, inst_per, inst_min) if created: print(six.text_type("Creating instance group {}".format(ig.name))) elif not created: print(six.text_type("Instance Group already registered {}").format(ig.name)) if ctrl: (ig_ctrl, changed2) = self.update_instance_group_controller(ig, ctrl) if changed2: print(six.text_type("Set controller group {} on {}.").format(ctrl, queuename)) try: (instances, changed3) = self.add_instances_to_group(ig, hostname_list) for i in instances: print(six.text_type("Added instance {} to {}").format(i.hostname, ig.name)) except InstanceNotFound as e: instance_not_found_err = e if any([changed1, changed2, changed3]): print('(changed: True)') if instance_not_found_err: print(instance_not_found_err.message) sys.exit(1) ```
[ { "content": "```python\n\"\"\"\n@author: Michael Guarino\n\"\"\"\n\nimport numpy as np\n\nnp.set_printoptions(threshold=np.nan)\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport tensorflow.contrib.layers as layers\n\nclass HAN:\n def __init__(self, max_seq_len, max_sent_len, num_classes,\n ...
[ { "content": "<|memory_start|>```python\n\"\"\"\n@author: Michael Guarino\n\"\"\"\n\nimport numpy as np\n\nnp.set_printoptions(threshold=np.nan)\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport tensorflow.contrib.layers as layers\n\nclass HAN:\n def __init__(self, max_seq_len, max_sent_len,...
```python """ @author: Michael Guarino """ import numpy as np np.set_printoptions(threshold=np.nan) import tensorflow as tf from tensorflow.contrib import rnn import tensorflow.contrib.layers as layers class HAN: def __init__(self, max_seq_len, max_sent_len, num_classes, vocab_size, embedding_size, max_grad_norm, dropout_keep_proba, learning_rate): ## Parameters self.learning_rate = learning_rate self.vocab_size = vocab_size self.num_classes = num_classes self.max_seq_len = max_seq_len self.embedding_size = embedding_size self.word_encoder_num_hidden = max_seq_len self.word_output_size = max_seq_len self.sentence_encoder_num_hidden = max_sent_len self.sentence_output_size = max_sent_len self.max_grad_norm = max_grad_norm self.dropout_keep_proba = dropout_keep_proba # tf graph input self.input_x = tf.placeholder(shape=[None, None, None], dtype=tf.int32, name="input_x") self.input_y = tf.placeholder(shape=[None, self.num_classes], dtype=tf.int32, name="input_y") self.word_lengths = tf.placeholder(shape=[None, None], dtype=tf.int32, name="word_lengths") self.sentence_lengths = tf.placeholder(shape=[None,], dtype=tf.int32, name="sentence_lengths") self.is_training = tf.placeholder(dtype=tf.bool, name="is_training") # input_x dims (self.document_size, self.sentence_size, self.word_size) = tf.unstack(tf.shape(self.input_x)) with tf.device("/gpu:0"), tf.name_scope("embedding_layer"): w = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), dtype=tf.float32, name="w") # TODO check if this needs to be marked as untrainable self.input_x_embedded = tf.nn.embedding_lookup(w, self.input_x) # reshape input_x after embedding self.input_x_embedded = tf.reshape(self.input_x_embedded, [self.document_size * self.sentence_size, self.word_size, self.embedding_size]) self.input_x_embedded_lengths = tf.reshape(self.word_lengths, [self.document_size * self.sentence_size]) with tf.variable_scope("word_level"): self.word_encoder_outputs = self.bidirectional_RNN(num_hidden=self.word_encoder_num_hidden, inputs=self.input_x_embedded) word_level_output = self.attention(inputs=self.word_encoder_outputs, output_size=self.word_output_size) with tf.variable_scope("dropout"): print('self.is_training: {}'.format(self.is_training)) word_level_output = layers.dropout(word_level_output, keep_prob=self.dropout_keep_proba, is_training=self.is_training) # reshape word_level output self.sentence_encoder_inputs = tf.reshape(word_level_output, [self.document_size, self.sentence_size, self.word_output_size]) with tf.variable_scope("sentence_level"): self.sentence_encoder_outputs = self.bidirectional_RNN(num_hidden=self.sentence_encoder_num_hidden, inputs=self.sentence_encoder_inputs) sentence_level_output = self.attention(inputs=self.sentence_encoder_outputs, output_size=self.sentence_output_size) with tf.variable_scope("dropout"): sentence_level_output = layers.dropout(sentence_level_output, keep_prob=self.dropout_keep_proba, is_training=self.is_training) # Final model prediction with tf.variable_scope("classifier_output"): self.logits = layers.fully_connected(sentence_level_output, self.num_classes, activation_fn=None) #trainable=self.is_training) self.predictions = tf.argmax(self.logits, axis=1, name="predictions") # Calculate mean cross-entropy loss with tf.variable_scope("loss"): losses = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.logits) self.loss = tf.reduce_mean(losses) tf.summary.scalar("Loss", self.loss) # Accuracy with tf.variable_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, axis=1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") tf.summary.scalar("Accuracy", self.accuracy) def bidirectional_RNN(self, num_hidden, inputs): """ desc: create bidirectional rnn layer args: num_hidden: number of hidden units inputs: input word or sentence returns: concatenated encoder and decoder outputs """ with tf.name_scope("bidirectional_RNN"): encoder_fw_cell = rnn.GRUCell(num_hidden) encoder_bw_cell = rnn.GRUCell(num_hidden) ((encoder_fw_outputs, encoder_bw_outputs), (_, _)) = tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_fw_cell, cell_bw=encoder_bw_cell, inputs=inputs, dtype=tf.float32, time_major=True) encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2) return encoder_outputs # end def attention(self, inputs, output_size): """ desc: create attention mechanism args: inputs: input which is sentence or document level output from bidirectional rnn layer output_size: specify the dimensions of the output returns: output from attention distribution """ with tf.variable_scope("attention"): attention_context_vector_uw = tf.get_variable(name="attention_context_vector", shape=[output_size], #trainable=self.is_training, initializer=layers.xavier_initializer(), dtype=tf.float32) input_projection_u = layers.fully_connected(inputs, output_size, #trainable=self.is_training, activation_fn=tf.tanh) vector_attn = tf.reduce_sum(tf.multiply(input_projection_u, attention_context_vector_uw), axis=2, keep_dims=True) attention_weights = tf.nn.softmax(vector_attn, dim=1) weighted_projection = tf.multiply(input_projection_u, attention_weights) outputs = tf.reduce_sum(weighted_projection, axis=1) return outputs # end # end ```
[ { "content": "Repeat the code exactly as the original, including blank lines:\n```python\n##########\n# pattern_impl.py\n##########\nfrom installmethod import installmethod # the installmethod from recipe: 223613\n\nclass ObserverPattern:\n \"\"\"\n A reusable implementation of the Observer pattern.\n ...
[ { "content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n##########\n# pattern_impl.py\n##########\nfrom installmethod import installmethod # the installmethod from recipe: 223613\n\nclass ObserverPattern:\n \"\"\"\n A reusable implementation of the Observe...
```python ########## # pattern_impl.py ########## from installmethod import installmethod # the installmethod from recipe: 223613 class ObserverPattern: """ A reusable implementation of the Observer pattern. """ theSubject = None observers = {} class Subject: def __init__(self): self.observers = [] def attach(self, observer): self.observers.append(observer) def detach(self, observer): self.observers.remove(observer) def notify(self): for observer in self.observers: observer.update(self) def decoration(self): self.decorated_trigger() self.notify() class Observer: def __init__(self, subject): subject.attach(self) def update(self, observer): currentState = observer.get_current_state() self.react_to_observation(currentState) def specify_subject(self, subject): self.theSubject = subject self.make_generalization(subject, self.Subject) def add_observer(self, observer): self.observers[observer.__name__] = observer self.make_generalization(observer, self.Observer) def make_generalization(self, childClass, parentClass): bases = list(childClass.__bases__) bases.append(parentClass) childClass.__bases__ = tuple(bases) def make_observation(self, changeObservation, changeReaction): func = getattr(self.theSubject, changeObservation) installmethod(func, self.theSubject, "get_current_state") for observer in self.observers.keys(): func = getattr(self.observers[observer], changeReaction) installmethod(func, self.observers[observer], "react_to_observation") def add_trigger(self, trigger): func = getattr(self.theSubject, trigger) installmethod(func, self.theSubject, "decorated_trigger") func = getattr(self.theSubject, "decoration") installmethod(func, self.theSubject, trigger) ########## # example.py ########## class ClockTimer: def get_time(self): # get current state of the subject return self.currentTime def tick(self): # update internal time-keeping state import time self.currentTime = time.ctime() class DigitalClock: def draw(self, currentTime): # display currentTime as a digital clock print "DigitalClock: current time is", currentTime class AnalogClock: def draw(self, currentTime): # display currentTime as an analog clock print "AnalogClock: current time is", currentTime if __name__ == '__main__': from pattern_impl import ObserverPattern observerPattern = ObserverPattern() observerPattern.specify_subject(ClockTimer) observerPattern.add_observer(DigitalClock) observerPattern.add_observer(AnalogClock) observerPattern.make_observation("get_time", "draw") observerPattern.add_trigger("tick") aTimer = ClockTimer() dClock = DigitalClock(aTimer) aClock = AnalogClock(aTimer) import time for i in range(10): print "\nTick!" aTimer.tick() time.sleep(1) ```
[ { "content": "Repeat the code precisely:\n```python\n\"\"\"\nThe :mod:`sklearn.model_selection._split` module includes classes and\nfunctions to split the data based on a preset strategy.\n\"\"\"\n\n# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,\n# Gael Varoquaux <gael.varoquaux@normalesup....
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\n\"\"\"\nThe :mod:`sklearn.model_selection._split` module includes classes and\nfunctions to split the data based on a preset strategy.\n\"\"\"\n\n# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,\n# Gael Varoquaux <gael.varoq...
```python """ The :mod:`sklearn.model_selection._split` module includes classes and functions to split the data based on a preset strategy. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org>, # Olivier Girsel <olivier.grisel@ensta.org> # Raghav R V <rvraghav93@gmail.com> # License: BSD 3 clause from __future__ import division from __future__ import print_function import numbers import warnings from abc import ABCMeta, abstractmethod from collections import Iterable from itertools import chain, combinations from math import ceil, floor import numpy as np from scipy.misc import comb from ..externals.six.moves import zip from ..base import _pprint from ..externals.six import with_metaclass from ..gaussian_process.kernels import Kernel as GPKernel from ..utils import indexable, check_random_state, safe_indexing from ..utils.fixes import bincount from ..utils.fixes import signature from ..utils.multiclass import type_of_target from ..utils.validation import _num_samples, column_or_1d __all__ = ['BaseCrossValidator', 'KFold', 'LabelKFold', 'LeaveOneLabelOut', 'LeaveOneOut', 'LeavePLabelOut', 'LeavePOut', 'ShuffleSplit', 'LabelShuffleSplit', 'StratifiedKFold', 'StratifiedShuffleSplit', 'PredefinedSplit', 'train_test_split', 'check_cv'] class BaseCrossValidator(with_metaclass(ABCMeta)): """Base class for all cross-validators Implementations must define `_iter_test_masks` or `_iter_test_indices`. """ def __init__(self): # We need this for the build_repr to work properly in py2.7 # see #6304 pass def split(self, X, y=None, labels=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, of length n_samples The target variable for supervised learning problems. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, labels = indexable(X, y, labels) indices = np.arange(_num_samples(X)) for test_index in self._iter_test_masks(X, y, labels): train_index = indices[np.logical_not(test_index)] test_index = indices[test_index] yield train_index, test_index # Since subclasses must implement either _iter_test_masks or # _iter_test_indices, neither can be abstract. def _iter_test_masks(self, X=None, y=None, labels=None): """Generates boolean masks corresponding to test sets. By default, delegates to _iter_test_indices(X, y, labels) """ for test_index in self._iter_test_indices(X, y, labels): test_mask = np.zeros(_num_samples(X), dtype=np.bool) test_mask[test_index] = True yield test_mask def _iter_test_indices(self, X=None, y=None, labels=None): """Generates integer indices corresponding to test sets.""" raise NotImplementedError @abstractmethod def get_n_splits(self, X=None, y=None, labels=None): """Returns the number of splitting iterations in the cross-validator""" def __repr__(self): return _build_repr(self) class LeaveOneOut(BaseCrossValidator): """Leave-One-Out cross-validator Provides train/test indices to split data in train/test sets. Each sample is used once as a test set (singleton) while the remaining samples form the training set. Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_folds=n)`` and ``LeavePOut(p=1)`` where ``n`` is the number of samples. Due to the high number of test sets (which is the same as the number of samples) this cross-validation method can be very costly. For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit` or :class:`StratifiedKFold`. Read more in the :ref:`User Guide <cross_validation>`. Examples -------- >>> from sklearn.model_selection import LeaveOneOut >>> X = np.array([[1, 2], [3, 4]]) >>> y = np.array([1, 2]) >>> loo = LeaveOneOut() >>> loo.get_n_splits(X) 2 >>> print(loo) LeaveOneOut() >>> for train_index, test_index in loo.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [1] TEST: [0] [[3 4]] [[1 2]] [2] [1] TRAIN: [0] TEST: [1] [[1 2]] [[3 4]] [1] [2] See also -------- LeaveOneLabelOut For splitting the data according to explicit, domain-specific stratification of the dataset. LabelKFold: K-fold iterator variant with non-overlapping labels. """ def _iter_test_indices(self, X, y=None, labels=None): return range(_num_samples(X)) def get_n_splits(self, X, y=None, labels=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : object Always ignored, exists for compatibility. labels : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ if X is None: raise ValueError("The X parameter should not be None") return _num_samples(X) class LeavePOut(BaseCrossValidator): """Leave-P-Out cross-validator Provides train/test indices to split data in train/test sets. This results in testing on all distinct samples of size p, while the remaining n - p samples form the training set in each iteration. Note: ``LeavePOut(p)`` is NOT equivalent to ``KFold(n_folds=n_samples // p)`` which creates non-overlapping test sets. Due to the high number of iterations which grows combinatorically with the number of samples this cross-validation method can be very costly. For large datasets one should favor :class:`KFold`, :class:`StratifiedKFold` or :class:`ShuffleSplit`. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- p : int Size of the test sets. Examples -------- >>> from sklearn.model_selection import LeavePOut >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> lpo = LeavePOut(2) >>> lpo.get_n_splits(X) 6 >>> print(lpo) LeavePOut(p=2) >>> for train_index, test_index in lpo.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [1 3] TEST: [0 2] TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 1] TEST: [2 3] """ def __init__(self, p): self.p = p def _iter_test_indices(self, X, y=None, labels=None): for combination in combinations(range(_num_samples(X)), self.p): yield np.array(combination) def get_n_splits(self, X, y=None, labels=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : object Always ignored, exists for compatibility. labels : object Always ignored, exists for compatibility. """ if X is None: raise ValueError("The X parameter should not be None") return int(comb(_num_samples(X), self.p, exact=True)) class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)): """Base class for KFold and StratifiedKFold""" @abstractmethod def __init__(self, n_folds, shuffle, random_state): if not isinstance(n_folds, numbers.Integral): raise ValueError('The number of folds must be of Integral type. ' '%s of type %s was passed.' % (n_folds, type(n_folds))) n_folds = int(n_folds) if n_folds <= 1: raise ValueError( "k-fold cross-validation requires at least one" " train/test split by setting n_folds=2 or more," " got n_folds={0}.".format(n_folds)) if not isinstance(shuffle, bool): raise TypeError("shuffle must be True or False;" " got {0}".format(shuffle)) self.n_folds = n_folds self.shuffle = shuffle self.random_state = random_state def split(self, X, y=None, labels=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) The target variable for supervised learning problems. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, labels = indexable(X, y, labels) n_samples = _num_samples(X) if self.n_folds > n_samples: raise ValueError( ("Cannot have number of folds n_folds={0} greater" " than the number of samples: {1}.").format(self.n_folds, n_samples)) for train, test in super(_BaseKFold, self).split(X, y, labels): yield train, test def get_n_splits(self, X=None, y=None, labels=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. labels : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return self.n_folds class KFold(_BaseKFold): """K-Folds cross-validator Provides train/test indices to split data in train/test sets. Split dataset into k consecutive folds (without shuffling by default). Each fold is then used once as a validation while the k - 1 remaining folds form the training set. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle the data before splitting into batches. random_state : None, int or RandomState When shuffle=True, pseudo-random number generator state used for shuffling. If None, use default numpy RNG for shuffling. Examples -------- >>> from sklearn.model_selection import KFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> kf = KFold(n_folds=2) >>> kf.get_n_splits(X) 2 >>> print(kf) # doctest: +NORMALIZE_WHITESPACE KFold(n_folds=2, random_state=None, shuffle=False) >>> for train_index, test_index in kf.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [0 1] TEST: [2 3] Notes ----- The first ``n_samples % n_folds`` folds have size ``n_samples // n_folds + 1``, other folds have size ``n_samples // n_folds``, where ``n_samples`` is the number of samples. See also -------- StratifiedKFold Takes label information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, n_folds=3, shuffle=False, random_state=None): super(KFold, self).__init__(n_folds, shuffle, random_state) def _iter_test_indices(self, X, y=None, labels=None): n_samples = _num_samples(X) indices = np.arange(n_samples) if self.shuffle: check_random_state(self.random_state).shuffle(indices) n_folds = self.n_folds fold_sizes = (n_samples // n_folds) * np.ones(n_folds, dtype=np.int) fold_sizes[:n_samples % n_folds] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield indices[start:stop] current = stop class LabelKFold(_BaseKFold): """K-fold iterator variant with non-overlapping labels. The same label will not appear in two different folds (the number of distinct labels has to be at least equal to the number of folds). The folds are approximately balanced in the sense that the number of distinct labels is approximately the same in each fold. Parameters ---------- n_folds : int, default=3 Number of folds. Must be at least 2. Examples -------- >>> from sklearn.model_selection import LabelKFold >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> labels = np.array([0, 0, 2, 2]) >>> label_kfold = LabelKFold(n_folds=2) >>> label_kfold.get_n_splits(X, y, labels) 2 >>> print(label_kfold) LabelKFold(n_folds=2) >>> for train_index, test_index in label_kfold.split(X, y, labels): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) ... TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [3 4] TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [3 4] [1 2] See also -------- LeaveOneLabelOut For splitting the data according to explicit domain-specific stratification of the dataset. """ def __init__(self, n_folds=3): super(LabelKFold, self).__init__(n_folds, shuffle=False, random_state=None) def _iter_test_indices(self, X, y, labels): if labels is None: raise ValueError("The labels parameter should not be None") unique_labels, labels = np.unique(labels, return_inverse=True) n_labels = len(unique_labels) if self.n_folds > n_labels: raise ValueError("Cannot have number of folds n_folds=%d greater" " than the number of labels: %d." % (self.n_folds, n_labels)) # Weight labels by their number of occurrences n_samples_per_label = np.bincount(labels) # Distribute the most frequent labels first indices = np.argsort(n_samples_per_label)[::-1] n_samples_per_label = n_samples_per_label[indices] # Total weight of each fold n_samples_per_fold = np.zeros(self.n_folds) # Mapping from label index to fold index label_to_fold = np.zeros(len(unique_labels)) # Distribute samples by adding the largest weight to the lightest fold for label_index, weight in enumerate(n_samples_per_label): lightest_fold = np.argmin(n_samples_per_fold) n_samples_per_fold[lightest_fold] += weight label_to_fold[indices[label_index]] = lightest_fold indices = label_to_fold[labels] for f in range(self.n_folds): yield np.where(indices == f)[0] class StratifiedKFold(_BaseKFold): """Stratified K-Folds cross-validator Provides train/test indices to split data in train/test sets. This cross-validation object is a variation of KFold that returns stratified folds. The folds are made by preserving the percentage of samples for each class. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle each stratification of the data before splitting into batches. random_state : None, int or RandomState When shuffle=True, pseudo-random number generator state used for shuffling. If None, use default numpy RNG for shuffling. Examples -------- >>> from sklearn.model_selection import StratifiedKFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> skf = StratifiedKFold(n_folds=2) >>> skf.get_n_splits(X, y) 2 >>> print(skf) # doctest: +NORMALIZE_WHITESPACE StratifiedKFold(n_folds=2, random_state=None, shuffle=False) >>> for train_index, test_index in skf.split(X, y): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 3] TEST: [0 2] TRAIN: [0 2] TEST: [1 3] Notes ----- All the folds have size ``trunc(n_samples / n_folds)``, the last one has the complementary. """ def __init__(self, n_folds=3, shuffle=False, random_state=None): super(StratifiedKFold, self).__init__(n_folds, shuffle, random_state) def _make_test_folds(self, X, y=None, labels=None): if self.shuffle: rng = check_random_state(self.random_state) else: rng = self.random_state y = np.asarray(y) n_samples = y.shape[0] unique_y, y_inversed = np.unique(y, return_inverse=True) y_counts = bincount(y_inversed) min_labels = np.min(y_counts) if np.all(self.n_folds > y_counts): raise ValueError("All the n_labels for individual classes" " are less than %d folds." % (self.n_folds)) if self.n_folds > min_labels: warnings.warn(("The least populated class in y has only %d" " members, which is too few. The minimum" " number of labels for any class cannot" " be less than n_folds=%d." % (min_labels, self.n_folds)), Warning) # pre-assign each sample to a test fold index using individual KFold # splitting strategies for each class so as to respect the balance of # classes # NOTE: Passing the data corresponding to ith class say X[y==class_i] # will break when the data is not 100% stratifiable for all classes. # So we pass np.zeroes(max(c, n_folds)) as data to the KFold per_cls_cvs = [ KFold(self.n_folds, shuffle=self.shuffle, random_state=rng).split(np.zeros(max(count, self.n_folds))) for count in y_counts] test_folds = np.zeros(n_samples, dtype=np.int) for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)): for cls, (_, test_split) in zip(unique_y, per_cls_splits): cls_test_folds = test_folds[y == cls] # the test split can be too big because we used # KFold(...).split(X[:max(c, n_folds)]) when data is not 100% # stratifiable for all the classes # (we use a warning instead of raising an exception) # If this is the case, let's trim it: test_split = test_split[test_split < len(cls_test_folds)] cls_test_folds[test_split] = test_fold_indices test_folds[y == cls] = cls_test_folds return test_folds def _iter_test_masks(self, X, y=None, labels=None): test_folds = self._make_test_folds(X, y) for i in range(self.n_folds): yield test_folds == i def split(self, X, y, labels=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) The target variable for supervised learning problems. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ return super(StratifiedKFold, self).split(X, y, labels) class LeaveOneLabelOut(BaseCrossValidator): """Leave One Label Out cross-validator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. Read more in the :ref:`User Guide <cross_validation>`. Examples -------- >>> from sklearn.model_selection import LeaveOneLabelOut >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> labels = np.array([1, 1, 2, 2]) >>> lol = LeaveOneLabelOut() >>> lol.get_n_splits(X, y, labels) 2 >>> print(lol) LeaveOneLabelOut() >>> for train_index, test_index in lol.split(X, y, labels): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [1 2] [1 2] TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [1 2] """ def _iter_test_masks(self, X, y, labels): if labels is None: raise ValueError("The labels parameter should not be None") # We make a copy of labels to avoid side-effects during iteration labels = np.array(labels, copy=True) unique_labels = np.unique(labels) for i in unique_labels: yield labels == i def get_n_splits(self, X, y, labels): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ if labels is None: raise ValueError("The labels parameter should not be None") return len(np.unique(labels)) class LeavePLabelOut(BaseCrossValidator): """Leave P Labels Out cross-validator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LeaveOneLabelOut is that the former builds the test sets with all the samples assigned to ``p`` different values of the labels while the latter uses samples all assigned the same labels. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_labels : int Number of labels (``p``) to leave out in the test split. Examples -------- >>> from sklearn.model_selection import LeavePLabelOut >>> X = np.array([[1, 2], [3, 4], [5, 6]]) >>> y = np.array([1, 2, 1]) >>> labels = np.array([1, 2, 3]) >>> lpl = LeavePLabelOut(n_labels=2) >>> lpl.get_n_splits(X, y, labels) 3 >>> print(lpl) LeavePLabelOut(n_labels=2) >>> for train_index, test_index in lpl.split(X, y, labels): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2] TEST: [0 1] [[5 6]] [[1 2] [3 4]] [1] [1 2] TRAIN: [1] TEST: [0 2] [[3 4]] [[1 2] [5 6]] [2] [1 1] TRAIN: [0] TEST: [1 2] [[1 2]] [[3 4] [5 6]] [1] [2 1] See also -------- LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, n_labels): self.n_labels = n_labels def _iter_test_masks(self, X, y, labels): if labels is None: raise ValueError("The labels parameter should not be None") labels = np.array(labels, copy=True) unique_labels = np.unique(labels) combi = combinations(range(len(unique_labels)), self.n_labels) for indices in combi: test_index = np.zeros(_num_samples(X), dtype=np.bool) for l in unique_labels[np.array(indices)]: test_index[labels == l] = True yield test_index def get_n_splits(self, X, y, labels): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ if labels is None: raise ValueError("The labels parameter should not be None") return int(comb(len(np.unique(labels)), self.n_labels, exact=True)) class BaseShuffleSplit(with_metaclass(ABCMeta)): """Base class for ShuffleSplit and StratifiedShuffleSplit""" def __init__(self, n_iter=10, test_size=0.1, train_size=None, random_state=None): _validate_shuffle_split_init(test_size, train_size) self.n_iter = n_iter self.test_size = test_size self.train_size = train_size self.random_state = random_state def split(self, X, y=None, labels=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) The target variable for supervised learning problems. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, labels = indexable(X, y, labels) for train, test in self._iter_indices(X, y, labels): yield train, test @abstractmethod def _iter_indices(self, X, y=None, labels=None): """Generate (train, test) indices""" def get_n_splits(self, X=None, y=None, labels=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. labels : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return self.n_iter def __repr__(self): return _build_repr(self) class ShuffleSplit(BaseShuffleSplit): """Random permutation cross-validator Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float, int, or None, default 0.1 If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn.model_selection import ShuffleSplit >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> rs = ShuffleSplit(n_iter=3, test_size=.25, random_state=0) >>> rs.get_n_splits(X) 3 >>> print(rs) ShuffleSplit(n_iter=3, random_state=0, test_size=0.25, train_size=None) >>> for train_index, test_index in rs.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... # doctest: +ELLIPSIS TRAIN: [3 1 0] TEST: [2] TRAIN: [2 1 3] TEST: [0] TRAIN: [0 2 1] TEST: [3] >>> rs = ShuffleSplit(n_iter=3, train_size=0.5, test_size=.25, ... random_state=0) >>> for train_index, test_index in rs.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... # doctest: +ELLIPSIS TRAIN: [3 1] TEST: [2] TRAIN: [2 1] TEST: [0] TRAIN: [0 2] TEST: [3] """ def _iter_indices(self, X, y=None, labels=None): n_samples = _num_samples(X) n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size) rng = check_random_state(self.random_state) for i in range(self.n_iter): # random partition permutation = rng.permutation(n_samples) ind_test = permutation[:n_test] ind_train = permutation[n_test:(n_test + n_train)] yield ind_train, ind_test class LabelShuffleSplit(ShuffleSplit): '''Shuffle-Labels-Out cross-validation iterator Provides randomized train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LabelShuffleSplit is that the former generates splits using all subsets of size ``p`` unique labels, whereas LabelShuffleSplit generates a user-determined number of random test splits, each with a user-determined fraction of unique labels. For example, a less computationally intensive alternative to ``LeavePLabelOut(p=10)`` would be ``LabelShuffleSplit(test_size=10, n_iter=100)``. Note: The parameters ``test_size`` and ``train_size`` refer to labels, and not to samples, as in ShuffleSplit. Parameters ---------- n_iter : int (default 5) Number of re-shuffling & splitting iterations. test_size : float (default 0.2), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the labels to include in the test split. If int, represents the absolute number of test labels. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the labels to include in the train split. If int, represents the absolute number of train labels. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. ''' def __init__(self, n_iter=5, test_size=0.2, train_size=None, random_state=None): super(LabelShuffleSplit, self).__init__( n_iter=n_iter, test_size=test_size, train_size=train_size, random_state=random_state) def _iter_indices(self, X, y, labels): if labels is None: raise ValueError("The labels parameter should not be None") classes, label_indices = np.unique(labels, return_inverse=True) for label_train, label_test in super( LabelShuffleSplit, self)._iter_indices(X=classes): # these are the indices of classes in the partition # invert them into data indices train = np.flatnonzero(np.in1d(label_indices, label_train)) test = np.flatnonzero(np.in1d(label_indices, label_test)) yield train, test class StratifiedShuffleSplit(BaseShuffleSplit): """Stratified ShuffleSplit cross-validator Provides train/test indices to split data in train/test sets. This cross-validation object is a merge of StratifiedKFold and ShuffleSplit, which returns stratified randomized folds. The folds are made by preserving the percentage of samples for each class. Note: like the ShuffleSplit strategy, stratified random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn.model_selection import StratifiedShuffleSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> sss = StratifiedShuffleSplit(n_iter=3, test_size=0.5, random_state=0) >>> sss.get_n_splits(X, y) 3 >>> print(sss) # doctest: +ELLIPSIS StratifiedShuffleSplit(n_iter=3, random_state=0, ...) >>> for train_index, test_index in sss.split(X, y): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2] TEST: [3 0] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 2] TEST: [3 1] """ def __init__(self, n_iter=10, test_size=0.1, train_size=None, random_state=None): super(StratifiedShuffleSplit, self).__init__( n_iter, test_size, train_size, random_state) def _iter_indices(self, X, y, labels=None): n_samples = _num_samples(X) n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size) classes, y_indices = np.unique(y, return_inverse=True) n_classes = classes.shape[0] class_counts = bincount(y_indices) if np.min(class_counts) < 2: raise ValueError("The least populated class in y has only 1" " member, which is too few. The minimum" " number of labels for any class cannot" " be less than 2.") if n_train < n_classes: raise ValueError('The train_size = %d should be greater or ' 'equal to the number of classes = %d' % (n_train, n_classes)) if n_test < n_classes: raise ValueError('The test_size = %d should be greater or ' 'equal to the number of classes = %d' % (n_test, n_classes)) rng = check_random_state(self.random_state) p_i = class_counts / float(n_samples) n_i = np.round(n_train * p_i).astype(int) t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)) for _ in range(self.n_iter): train = [] test = [] for i, class_i in enumerate(classes): permutation = rng.permutation(class_counts[i]) perm_indices_class_i = np.where((y == class_i))[0][permutation] train.extend(perm_indices_class_i[:n_i[i]]) test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]]) # Because of rounding issues (as n_train and n_test are not # dividers of the number of elements per class), we may end # up here with less samples in train and test than asked for. if len(train) < n_train or len(test) < n_test: # We complete by affecting randomly the missing indexes missing_indices = np.where(bincount(train + test, minlength=len(y)) == 0)[0] missing_indices = rng.permutation(missing_indices) train.extend(missing_indices[:(n_train - len(train))]) test.extend(missing_indices[-(n_test - len(test)):]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def split(self, X, y, labels=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) The target variable for supervised learning problems. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ return super(StratifiedShuffleSplit, self).split(X, y, labels) def _validate_shuffle_split_init(test_size, train_size): """Validation helper to check the test_size and train_size at init NOTE This does not take into account the number of samples which is known only at split """ if test_size is None and train_size is None: raise ValueError('test_size and train_size can not both be None') if test_size is not None: if np.asarray(test_size).dtype.kind == 'f': if test_size >= 1.: raise ValueError( 'test_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif np.asarray(test_size).dtype.kind != 'i': # int values are checked during split based on the input raise ValueError("Invalid value for test_size: %r" % test_size) if train_size is not None: if np.asarray(train_size).dtype.kind == 'f': if train_size >= 1.: raise ValueError("train_size=%f should be smaller " "than 1.0 or be an integer" % train_size) elif (np.asarray(test_size).dtype.kind == 'f' and (train_size + test_size) > 1.): raise ValueError('The sum of test_size and train_size = %f, ' 'should be smaller than 1.0. Reduce ' 'test_size and/or train_size.' % (train_size + test_size)) elif np.asarray(train_size).dtype.kind != 'i': # int values are checked during split based on the input raise ValueError("Invalid value for train_size: %r" % train_size) def _validate_shuffle_split(n_samples, test_size, train_size): """ Validation helper to check if the test/test sizes are meaningful wrt to the size of the data (n_samples) """ if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and test_size >= n_samples): raise ValueError('test_size=%d should be smaller than the number of ' 'samples %d' % (test_size, n_samples)) if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and train_size >= n_samples): raise ValueError("train_size=%d should be smaller than the number of" " samples %d" % (train_size, n_samples)) if np.asarray(test_size).dtype.kind == 'f': n_test = ceil(test_size * n_samples) elif np.asarray(test_size).dtype.kind == 'i': n_test = float(test_size) if train_size is None: n_train = n_samples - n_test elif np.asarray(train_size).dtype.kind == 'f': n_train = floor(train_size * n_samples) else: n_train = float(train_size) if test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError('The sum of train_size and test_size = %d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n_samples)) return int(n_train), int(n_test) class PredefinedSplit(BaseCrossValidator): """Predefined split cross-validator Splits the data into training/test set folds according to a predefined scheme. Each sample can be assigned to at most one test set fold, as specified by the user through the ``test_fold`` parameter. Read more in the :ref:`User Guide <cross_validation>`. Examples -------- >>> from sklearn.model_selection import PredefinedSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> test_fold = [0, 1, -1, 1] >>> ps = PredefinedSplit(test_fold) >>> ps.get_n_splits() 2 >>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS PredefinedSplit(test_fold=array([ 0, 1, -1, 1])) >>> for train_index, test_index in ps.split(): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2 3] TEST: [0] TRAIN: [0 2] TEST: [1 3] """ def __init__(self, test_fold): self.test_fold = np.array(test_fold, dtype=np.int) self.test_fold = column_or_1d(self.test_fold) self.unique_folds = np.unique(self.test_fold) self.unique_folds = self.unique_folds[self.unique_folds != -1] def split(self, X=None, y=None, labels=None): """Generate indices to split data into training and test set. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. labels : object Always ignored, exists for compatibility. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ ind = np.arange(len(self.test_fold)) for test_index in self._iter_test_masks(): train_index = ind[np.logical_not(test_index)] test_index = ind[test_index] yield train_index, test_index def _iter_test_masks(self): """Generates boolean masks corresponding to test sets.""" for f in self.unique_folds: test_index = np.where(self.test_fold == f)[0] test_mask = np.zeros(len(self.test_fold), dtype=np.bool) test_mask[test_index] = True yield test_mask def get_n_splits(self, X=None, y=None, labels=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. labels : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return len(self.unique_folds) class _CVIterableWrapper(BaseCrossValidator): """Wrapper class for old style cv objects and iterables.""" def __init__(self, cv): self.cv = cv def get_n_splits(self, X=None, y=None, labels=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. labels : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return len(self.cv) # Both iterables and old-cv objects support len def split(self, X=None, y=None, labels=None): """Generate indices to split data into training and test set. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. labels : object Always ignored, exists for compatibility. Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ for train, test in self.cv: yield train, test def check_cv(cv=3, y=None, classifier=False): """Input checker utility for building a cross-validator Parameters ---------- cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if classifier is True and ``y`` is either binary or multiclass, :class:`StratifiedKFold` used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. y : array-like, optional The target variable for supervised learning problems. classifier : boolean, optional, default False Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv : a cross-validator instance. The return value is a cross-validator which generates the train/test splits via the ``split`` method. """ if cv is None: cv = 3 if isinstance(cv, numbers.Integral): if (classifier and (y is not None) and (type_of_target(y) in ('binary', 'multiclass'))): return StratifiedKFold(cv) else: return KFold(cv) if not hasattr(cv, 'split') or isinstance(cv, str): if not isinstance(cv, Iterable) or isinstance(cv, str): raise ValueError("Expected cv as an integer, cross-validation " "object (from sklearn.model_selection) " "or an iterable. Got %s." % cv) return _CVIterableWrapper(cv) return cv # New style cv objects are passed without any modification def train_test_split(*arrays, **options): """Split arrays or matrices into random train and test subsets Quick utility that wraps input validation and ``next(ShuffleSplit().split(X, y))`` and application to input data into a single call for splitting (and optionally subsampling) data in a oneliner. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- *arrays : sequence of indexables with same length / shape[0] allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes. .. versionadded:: 0.16 preserves input type instead of always casting to numpy array. test_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. stratify : array-like or None (default is None) If not None, data is split in a stratified fashion, using this as the labels array. Returns ------- splitting : list, length=2 * len(arrays) List containing train-test split of inputs. .. versionadded:: 0.16 Output type is the same as the input type. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import train_test_split >>> X, y = np.arange(10).reshape((5, 2)), range(5) >>> X array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) >>> list(y) [0, 1, 2, 3, 4] >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, test_size=0.33, random_state=42) ... >>> X_train array([[4, 5], [0, 1], [6, 7]]) >>> y_train [2, 0, 3] >>> X_test array([[2, 3], [8, 9]]) >>> y_test [1, 4] """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) stratify = options.pop('stratify', None) if options: raise TypeError("Invalid parameters passed: %s" % str(options)) if test_size is None and train_size is None: test_size = 0.25 arrays = indexable(*arrays) if stratify is not None: CVClass = StratifiedShuffleSplit else: CVClass = ShuffleSplit cv = CVClass(test_size=test_size, train_size=train_size, random_state=random_state) train, test = next(cv.split(X=arrays[0], y=stratify)) return list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in arrays)) train_test_split.__test__ = False # to avoid a pb with nosetests def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if (hasattr(estimator, 'kernel') and callable(estimator.kernel) and not isinstance(estimator.kernel, GPKernel)): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[index] for index in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset def _build_repr(self): # XXX This is copied from BaseEstimator's get_params cls = self.__class__ init = getattr(cls.__init__, 'deprecated_original', cls.__init__) # Ignore varargs, kw and default values and pop self init_signature = signature(init) # Consider the constructor parameters excluding 'self' if init is object.__init__: args = [] else: args = sorted([p.name for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD]) class_name = self.__class__.__name__ params = dict() for key in args: # We need deprecation warnings to always be on in order to # catch deprecated param values. # This is set in utils/__init__.py but it gets overwritten # when running under python3 somehow. warnings.simplefilter("always", DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category == DeprecationWarning: # if the parameter is deprecated, don't show it continue finally: warnings.filters.pop(0) params[key] = value return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name))) ```
[ { "content": "Recreate the original code text:\n```python\n#!/usr/bin/env python\n\n\nimport sys\nimport os\nimport traceback\nimport time\nimport gc\nimport hashlib\n\n#gc.disable()\n\nsys.path.insert(0, '../../PyPDF2/')\n\nimport PyPDF2\nimport find_pdfrw\nimport pdfrw\n\nfrom PyPDF2 import PdfFileReader, Pdf...
[ { "content": "Recreate the original code text:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n\nimport sys\nimport os\nimport traceback\nimport time\nimport gc\nimport hashlib\n\n#gc.disable()\n\nsys.path.insert(0, '../../PyPDF2/')\n\nimport PyPDF2\nimport find_pdfrw\nimport pdfrw\n\nfrom PyPDF2 import Pd...
```python #!/usr/bin/env python import sys import os import traceback import time import gc import hashlib #gc.disable() sys.path.insert(0, '../../PyPDF2/') import PyPDF2 import find_pdfrw import pdfrw from PyPDF2 import PdfFileReader, PdfFileWriter import find_pdfrw from pdfrw import PdfReader, PdfWriter, PdfParseError allfiles = (x.split('#',1)[0] for x in open('data/allpdfs.txt').read().splitlines()) allfiles = [x for x in allfiles if x] badfiles = [] goodfiles = [] times = [] sys.setrecursionlimit(20000) outdir = 'testout' if not os.path.exists(outdir): os.mkdir(outdir) if 0: reader, writer = PyPDF2.PdfFileReader, PyPDF2.PdfFileWriter else: reader, writer = pdfrw.PdfReader, pdfrw.PdfWriter pdferr = pdfrw.PdfParseError def test_pdf(pdfname): outfn = os.path.join(outdir, hashlib.md5(pdfname).hexdigest() + '.pdf') pdf_in = reader(open(pdfname)) pdf_out = writer() for pg_num in range(pdf_in.numPages): pdf_out.addPage(pdf_in.getPage(pg_num)) out_stream = open(outfn, "wb") pdf_out.write(out_stream) out_stream.close() try: for fname in allfiles: #print >> sys.stderr, "File name", fname print "File name", fname sys.stdout.flush() start = time.time() try: test_pdf(fname) except Exception, s: sys.stderr.flush() ok = False if isinstance(s, PdfParseError): print '[ERROR]', s else: print traceback.format_exc()[-2000:] #raise else: sys.stderr.flush() ok = True elapsed = time.time() - start print ok and "[OK]" or "[FAIL]" print (badfiles, goodfiles)[ok].append(fname) times.append((elapsed, fname)) except KeyboardInterrupt: raise pass print "Total = %s, good = %s, bad = %s" % (len(times), len(goodfiles), len(badfiles)) times.sort() times.reverse() f = open('log.txt', 'a') print >> f, '\n\n\n\n\n\n***************************************************************************\n\n\n' for fname in goodfiles: print >> f, 'good', fname print >> f for fname in badfiles: print >> f, 'bad', fname print >> f for stuff in times: print >> f, '%0.2f %s' % stuff f.close() ```
[ { "content": "Provide a verbatim copy of the code:\n```python\n# encoding: utf-8\nfrom collections import Counter\nimport re\nimport os.path\n\n#from https://mathiasbynens.be/demo/url-regex diego's one\nVALID_URL= re.compile(r\"^(?:(?:https?|ftp):\\/\\/)(?:\\S+(?::\\S*)?@)?(?:(?!10(?:\\.\\d{1,3}){3})(?!127(?:\\...
[ { "content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n# encoding: utf-8\nfrom collections import Counter\nimport re\nimport os.path\n\n#from https://mathiasbynens.be/demo/url-regex diego's one\nVALID_URL= re.compile(r\"^(?:(?:https?|ftp):\\/\\/)(?:\\S+(?::\\S*)?@)?(?:(?!10(?:\\.\\d{1,3}...
```python # encoding: utf-8 from collections import Counter import re import os.path #from https://mathiasbynens.be/demo/url-regex diego's one VALID_URL= re.compile(r"^(?:(?:https?|ftp):\/\/)(?:\S+(?::\S*)?@)?(?:(?!10(?:\.\d{1,3}){3})(?!127(?:\.\d{1,3}){3})(?!169\.254(?:\.\d{1,3}){2})(?!192\.168(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z\\x{00a1}\-\\x{ffff}0-9]+-?)*[a-z\\x{00a1}\-\\x{ffff}0-9]+)(?:\.(?:[a-z\\x{00a1}\-\\x{ffff}0-9]+-?)*[a-z\\x{00a1}\-\\x{ffff}0-9]+)*(?:\.(?:[a-z\\x{00a1}\-\\x{ffff}]{2,})))(?::\d{2,5})?(?:\/[^\s]*)?$") VALID_LOCAL_URL = re.compile(r"^/?[a-z0-9\/_\.\-=\?]+$") VALID_FILENAME = re.compile(r'^[a-z\/][a-z0-9_\-/\.]+\.[a-z]{1,5}$') def lint(post, test_info, config): "Check the frontmatter of a given post for potential errors" results = [] # Testing is meta exists otherwise bailout if post.meta == None: results.append(['E100', test_info['E100']]) return results # Run metas tests tests = [ e101_mandatory_fields, e102_mandatory_fields_for_specific_templates, e103_field_value, e104_duplicate_value, e105_category_in_tags, e106_duplicate_spaces, e107_e108_e109_authors_formating, e110_lowercase_fields, e111_e112_local_files_exists, e113_e114_e115_banner_properly_formated, e116_value_not_null, e117_e118_e119_permanent_url_is_properly_formated, e120_valid_permanent_url_prefix, e121_file_properly_named, ] for test in tests: results += test(post, test_info, config) return results def e101_mandatory_fields(post, test_info, config): "Check for the presence of mandatory fields in the meta" results = [] for field in config.frontmatter_mandatory_fields: if field not in post.meta: results.append(['E101', test_info['E101'] % field]) return results def e102_mandatory_fields_for_specific_templates(post, test_info, config): "Check for the presense of mandatory field for specific template" results = [] if "template" not in post.meta: return results if post.meta.template in config.frontmatter_mandatory_fields_by_templates: for field in config.frontmatter_mandatory_fields_by_templates[post.meta.template]: if field not in post.meta: info = test_info['E102'] % (field, post.meta.template) results.append(['E102', info]) return results def e103_field_value(post, test_info, config): "Check if the value for specific fields match the list" results = [] for field in config.frontmatter_fields_value: if field in post.meta: if post.meta[field] not in config.frontmatter_fields_value[field]: info = test_info['E103'] % (field, post.meta[field], config.frontmatter_fields_value[field]) results.append(['E103', info]) return results def e104_duplicate_value(post, test_info, config): "Check if a value appears twice in a field list" results = [] for field in post.meta: value_field = post.meta[field] if isinstance(value_field, list): count = Counter() for elt in value_field: try: count[elt] += 1 except: continue duplicates = [] for elt in count.most_common(): if elt[1] > 1: duplicates.append(elt[0]) if len(duplicates): info = test_info['E104'] % (field, " ,".join(duplicates)) results.append(['E104', info]) return results def e105_category_in_tags(post, test_info, config): "Check if the category appears in the tag list" results = [] if "category" in post.meta and "tags" in post.meta: if post.meta.tags and post.meta.category in post.meta.tags: info = test_info['E105'] % (post.meta.category, " ,".join(post.meta.tags)) results.append(['E105', info]) return results def e106_duplicate_spaces(post, test_info, config): "Check if there are extra spaces" results = [] for field in post.meta: value = post.meta[field] if not isinstance(value, list): value = [value] for elt in value: if isinstance(elt, str): extra_space = re.search(" {2,}", elt) if extra_space: info = test_info['E106'] % (field, elt) results.append(['E106', info]) return results def e107_e108_e109_authors_formating(post, test_info, config): "Check if the authors list is properly formatted" results = [] if not "authors" in post.meta: return results authors = post.meta.authors if not isinstance(authors, list): info = test_info['E107'] % authors results.append(['E107', authors]) return results for author in authors: if ',' not in author: info = test_info['E108'] % authors results.append(['E108', info]) else: firstname, lastname = author.replace(' ', '').split(',') if not firstname[0].isupper() or not lastname[0].isupper(): info = test_info['E109'] % firstname results.append(['E109', info]) return results def e110_lowercase_fields(post, test_info, config): "Check that field values are indeed lowercase" results = [] for field in config.frontmatter_field_values_must_be_lowercase: if field in post.meta: value = post.meta[field] if not isinstance(value, list): value = [value] for elt in value: if isinstance(elt, str): if not elt.islower(): info = test_info['E110'] % (field, elt) results.append(['E110', info]) return results def e111_e112_local_files_exists(post, test_info, config): "check if local files exists" results = [] site_dir = config.site_output_dir if "files" in post.meta: if not isinstance(post.meta.files, dict): info = test_info['E112'] % (type(post.meta.files)) results.append(['E112', info]) return results for fname, fpath in post.meta.files.items(): if fpath[0] == '/': full_path = os.path.join(site_dir, fpath[1:]) if not os.path.isfile(full_path): info = test_info['E111'] % (fname, full_path) results.append(['E111', info]) return results def e113_e114_e115_banner_properly_formated(post, test_info, config): "Ensure the banner is properly formated" results = [] if not "banner" in post.meta: return results banner = post.meta.banner if not isinstance(banner, str) and not isinstance(banner, unicode): info = test_info['E113'] % (type(banner)) results.append(['E113', info]) return results if "http" in banner[:6]: if not VALID_URL.match(banner): info = test_info['E114'] % (banner) results.append(['E114', info]) else: if not VALID_FILENAME.match(banner): info = test_info['E115'] % (banner) results.append(['E115', info]) return results def e116_value_not_null(post, test_info, config): "Ensure the field value are not null" results = [] for field in post.meta: if post.meta[field] == None: info = test_info['E116'] % (field) results.append(['E116', info]) return results def e117_e118_e119_permanent_url_is_properly_formated(post, test_info, config): results = [] if not "permanent_url" in post.meta: return results url = post.meta.permanent_url if not isinstance(url, str) and not isinstance(url, unicode): info = test_info['E117'] % (type(url)) results.append(['E117', info]) return results if url != "" and not VALID_URL.match(url) and not VALID_LOCAL_URL.match(url): info = test_info['E118'] % (url) results.append(['E118', info]) if len(url) and url[0] != '/': info = test_info['E119'] % (url) results.append(['E119', info]) return results def e120_valid_permanent_url_prefix(post, test_info, config): "Check if the permanent url has a valid template based of its prefix" results = [] if "template" not in post.meta or not "permanent_url" in post.meta: return results tlp = post.meta.template if tlp not in config.permanent_url_valid_prefixes_by_template: return results prefix = config.permanent_url_valid_prefixes_by_template[tlp] permanent_url = str( post.meta.permanent_url) if not permanent_url.startswith(prefix): info = test_info['E120'] % (permanent_url, prefix) results.append(['E120', info]) return results def e121_file_properly_named(post, test_info, config): "Check if the files are properly named" results = [] # test if it contains -slides.pdf or -paper.pdf # test it contains the name of the short url (see rename tools) if "files" not in post.meta or not isinstance(post.meta.files, dict): return results for t, f in post.meta.files.items(): # valid type if t not in config.files.valid_types: info = test_info['E121'] % (t) results.append(['E121', info]) #valid characters if not VALID_URL.match(f) and not VALID_LOCAL_URL.match(f): info = test_info['E122'] % (f) results.append(['E122', info]) #valid prefix valid = False for prefix in config.files.valid_prefixes: if f.startswith(prefix): valid = True if not valid: info = test_info['E123'] % (f, " ,".join(config.files.valid_prefixes)) results.append(['E123', info]) #valid suffix valid = False for suffix in config.files.valid_suffixes: if f.endswith(suffix): valid = True if not valid and not f.startswith("http"): info = test_info['E124'] % (f, " ,".join(config.files.valid_suffixes)) results.append(['E124', info]) return results ```
[ { "content": "Repeat the full code snippet:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\nScrapy settings for tegenaria project.\n\nFor simplicity, this file contains only settings considered important or\ncommonly used. You can find more settings consulting the documentation:\n\nhttp://doc.scrapy.org/en/latest/...
[ { "content": "Repeat the full code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nScrapy settings for tegenaria project.\n\nFor simplicity, this file contains only settings considered important or\ncommonly used. You can find more settings consulting the documentation:\n\nhttp://doc.scrap...
```python # -*- coding: utf-8 -*- """ Scrapy settings for tegenaria project. For simplicity, this file contains only settings considered important or commonly used. You can find more settings consulting the documentation: http://doc.scrapy.org/en/latest/topics/settings.html http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html """ import os from typing import List from prettyconf import config class Config(object): """App configuration.""" SECRET_KEY = os.environ.get("TEGENARIA_SECRET", "secret-key") # TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) ASSETS_DEBUG = False DEBUG_TB_ENABLED = True DEBUG_TB_INTERCEPT_REDIRECTS = False SQLALCHEMY_TRACK_MODIFICATIONS = False class ProdConfig(Config): """Production configuration.""" ENV = "prod" DEBUG = False SQLALCHEMY_DATABASE_URI = "postgresql://tegenaria:tegenaria@localhost:5440/tegenaria" DEBUG_TB_ENABLED = False # Disable Debug toolbar class DevConfig(Config): """Development configuration.""" ENV = "dev" DEBUG = True DB_NAME = "dev.db" # Put the db file in project root SQLALCHEMY_DATABASE_URI = "postgresql://tegenaria_dev:tegenaria_dev@localhost:5440/tegenaria_dev" ASSETS_DEBUG = True # Don't bundle/minify static assets class TestConfig(Config): """Testing configuration.""" TESTING = True DEBUG = True SQLALCHEMY_DATABASE_URI = "sqlite://" WTF_CSRF_ENABLED = False # Allows form testing GOOGLE_MATRIX_API_KEYS = config("GOOGLE_MATRIX_API_KEYS", cast=config.list, default=[]) # type: List[str] BOT_NAME = "tegenaria" SPIDER_MODULES = ["tegenaria.spiders"] NEWSPIDER_MODULE = "tegenaria.spiders" # FEED_URI = 'file:///tmp/tegenaria/%(name)s.json' # FEED_FORMAT = 'jsonlines' # Crawl responsibly by identifying yourself (and your website) on the user-agent # USER_AGENT = 'tegenaria (+http://www.yourdomain.com)' # Configure maximum concurrent requests performed by Scrapy (default: 16) CONCURRENT_REQUESTS = 1 # Configure a delay for requests for the same website (default: 0) # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs DOWNLOAD_DELAY = config("DOWNLOAD_DELAY", cast=float, default=0.5) # The download delay setting will honor only one of: # CONCURRENT_REQUESTS_PER_DOMAIN=16 # CONCURRENT_REQUESTS_PER_IP=16 # Disable cookies (enabled by default) # COOKIES_ENABLED=False # Disable Telnet Console (enabled by default) # TELNETCONSOLE_ENABLED=False # Override the default request headers: # DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', # } # Enable or disable spider middlewares # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html # SPIDER_MIDDLEWARES = { # 'tegenaria.middlewares.MyCustomSpiderMiddleware': 543, # } # Enable or disable downloader middlewares # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # DOWNLOADER_MIDDLEWARES = { # 'tegenaria.middlewares.MyCustomDownloaderMiddleware': 543, # } # Enable or disable extensions # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html # EXTENSIONS = { # 'scrapy.telnet.TelnetConsole': None, # } # Configure item pipelines # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { "tegenaria.pipelines.ApartmentPipeline": 300, } # Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html # NOTE: AutoThrottle will honour the standard settings for concurrency and delay # AUTOTHROTTLE_ENABLED=True # The initial download delay # AUTOTHROTTLE_START_DELAY=5 # The maximum download delay to be set in case of high latencies # AUTOTHROTTLE_MAX_DELAY=60 # Enable showing throttling stats for every response received: # AUTOTHROTTLE_DEBUG=False # Enable and configure HTTP caching (disabled by default) # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings HTTPCACHE_ENABLED = True # HTTPCACHE_EXPIRATION_SECS = 0 # HTTPCACHE_DIR = 'httpcache' # HTTPCACHE_IGNORE_HTTP_CODES = [] # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy" # https://doc.scrapy.org/en/latest/topics/extensions.html#closespider-errorcount CLOSESPIDER_ERRORCOUNT = 1 ```
[ { "content": "```python\n#!/usr/bin/env python\n\n\"\"\"\n parallel_tree.py - Version 1.0 2013-09-22\n \n Run two tasks in parallel using the pi_trees library.\n \n Created for the Pi Robot Project: http://www.pirobot.org\n Copyright (c) 2014 Patrick Goebel. All rights reserved.\n\n This p...
[ { "content": "<|memory_start|>```python\n#!/usr/bin/env python\n\n\"\"\"\n parallel_tree.py - Version 1.0 2013-09-22\n \n Run two tasks in parallel using the pi_trees library.\n \n Created for the Pi Robot Project: http://www.pirobot.org\n Copyright (c) 2014 Patrick Goebel. All rights reserve...
```python #!/usr/bin/env python """ parallel_tree.py - Version 1.0 2013-09-22 Run two tasks in parallel using the pi_trees library. Created for the Pi Robot Project: http://www.pirobot.org Copyright (c) 2014 Patrick Goebel. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details at: http://www.gnu.org/licenses/gpl.html """ from pi_trees_lib.pi_trees_lib import * import time class ParallelExample(): def __init__(self): # The root node BEHAVE = Sequence("behave") # The message to print message = "Take me to your leader!" # How high the counting task should count n_count = 10 # Create a PrintMessage() task as defined later in the script PRINT_MESSAGE = PrintMessage("PRINT_MESSAGE", message) # Create a Count() task, also defined later in the script COUNT_TO_10 = Count("COUNT_TO_10", n_count) # Initialize the ParallelAll task PARALLEL_DEMO = ParallelAll("PRINT_AND_COUNT") # Add the two subtasks to the Parallel task PARALLEL_DEMO.add_child(PRINT_MESSAGE) PARALLEL_DEMO.add_child(COUNT_TO_10) # Add the Parallel task to the root task BEHAVE.add_child(PARALLEL_DEMO) # Display the behavior tree print "Behavior Tree Structure" print_tree(BEHAVE) # Initialize the overall status status = None # Run the tree while not status == TaskStatus.SUCCESS: status = BEHAVE.run() time.sleep(0.1) class PrintMessage(Task): def __init__(self, name, message, *args, **kwargs): super(PrintMessage, self).__init__(name, *args, **kwargs) self.name = name self.message = message self.words = message.split() print "Creating Print Message task for", self.message def run(self): try: word = self.words.pop(0) print word time.sleep(0.1) if self.words == []: return TaskStatus.SUCCESS return TaskStatus.RUNNING except: return TaskStatus.SUCCESS def reset(self): self.words = self.message.split() class Count(Task): def __init__(self, name, number, *args, **kwargs): super(Count, self).__init__(name, *args, **kwargs) self.name = name self.number = number self.count = 0 print "Creating counting task to", self.number def run(self): if self.count == self.number: return TaskStatus.SUCCESS else: time.sleep(0.1) self.count += 1 print self.count if self.count == self.number: return TaskStatus.SUCCESS return TaskStatus.RUNNING def reset(self): self.count = 0 if __name__ == '__main__': try: tree = ParallelExample() except KeyboardInterrupt: pass ```
[ { "content": "Provide an exact copy of the source code:\n```python\nimport pytest\n\n\ndef test_default():\n from vishnu.backend import Redis\n from vishnu.backend.config.redis import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_DB\n\n config = Redis()\n assert config.host == DEFAULT_HOST\n assert config....
[ { "content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport pytest\n\n\ndef test_default():\n from vishnu.backend import Redis\n from vishnu.backend.config.redis import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_DB\n\n config = Redis()\n assert config.host == DEFAULT_HOST\n ...
```python import pytest def test_default(): from vishnu.backend import Redis from vishnu.backend.config.redis import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_DB config = Redis() assert config.host == DEFAULT_HOST assert config.port == DEFAULT_PORT assert config.db == DEFAULT_DB def test_custom_host(): from vishnu.backend import Redis from vishnu.backend.config.redis import DEFAULT_PORT, DEFAULT_DB custom_host = "memcache.cloud" config = Redis(host=custom_host) assert config.host == custom_host assert config.port == DEFAULT_PORT assert config.db == DEFAULT_DB def test_invalid_host(): from vishnu.backend import Redis with pytest.raises(TypeError) as exp: Redis(host=23) def test_custom_port(): from vishnu.backend import Redis from vishnu.backend.config.redis import DEFAULT_HOST, DEFAULT_DB custom_port = 6380 config = Redis(port=custom_port) assert config.host == DEFAULT_HOST assert config.port == custom_port assert config.db == DEFAULT_DB def test_invalid_port(): from vishnu.backend import Redis with pytest.raises(TypeError) as exp: Redis(port="string") with pytest.raises(TypeError) as exp: Redis(port=-100) def test_invalid_db(): from vishnu.backend import Redis with pytest.raises(TypeError) as exp: Redis(db=-1) with pytest.raises(TypeError) as exp: Redis(db="db") def test_custom_db(): from vishnu.backend import Redis custom_db = 1 config = Redis(db=custom_db) assert config.db == custom_db ```
[ { "content": "Repeat the code precisely:\n```python\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nathlete_teams = db.Table('athlete_teams',\n db.Column('athlete_id', db.Integer, db.ForeignKey('athletes.id')),\n db.Column('team_id', db.Integer, db.ForeignKey('teams.id'))\n)\n\n\nclass At...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nathlete_teams = db.Table('athlete_teams',\n db.Column('athlete_id', db.Integer, db.ForeignKey('athletes.id')),\n db.Column('team_id', db.Integer, db.ForeignKey('teams.id'))\...
```python from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() athlete_teams = db.Table('athlete_teams', db.Column('athlete_id', db.Integer, db.ForeignKey('athletes.id')), db.Column('team_id', db.Integer, db.ForeignKey('teams.id')) ) class Athlete(db.Model): __tablename__ = 'athletes' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) age = db.Column(db.Integer) hieght = db.Column(db.String(4)) wieght = db.Column(db.Integer) clean_and_jerk = db.Column(db.String(32)) snatch = db.Column(db.String(32)) deadlift = db.Column(db.String(32)) back_squat = db.Column(db.String(32)) max_pullups = db.Column(db.Integer) run_5k = db.Column(db.String(32)) def __repr__(self): return "{'name' : '%s'}" % self.name class Team(db.Model): __tablename__ = 'teams' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) captain = db.Column(db.String(64)) athletes = db.relationship('Athlete', secondary=athlete_teams, backref=db.backref('teams', lazy='dynamic')) def __repr__(self): return "<%s, %s>" % (self.name, self.athletes) ```
[ { "content": "Recreate the original code text:\n```python\nimport scrapy\n\nfrom ..items import Article, Comment\n\nclass GuardianSpider(scrapy.Spider):\n name = \"guardian\"\n allowed_domains = [\"theguardian.com\"]\n start_urls = [\n \"http://www.theguardian.com/world/2016/feb/09/draft-snooper...
[ { "content": "Recreate the original code text:\n<|memory_start|>```python\nimport scrapy\n\nfrom ..items import Article, Comment\n\nclass GuardianSpider(scrapy.Spider):\n name = \"guardian\"\n allowed_domains = [\"theguardian.com\"]\n start_urls = [\n \"http://www.theguardian.com/world/2016/feb/...
```python import scrapy from ..items import Article, Comment class GuardianSpider(scrapy.Spider): name = "guardian" allowed_domains = ["theguardian.com"] start_urls = [ "http://www.theguardian.com/world/2016/feb/09/draft-snoopers-charter-fails-on-spying-powers-and-privacy-protections" ] def __init__(self, job_id=-1, url="http://www.theguardian.com/world/2016/feb/09/draft-snoopers-charter-fails-on-spying-powers-and-privacy-protections" ): self.start_urls = [url] self.job_id = job_id def parse(self, response): ''' Parse the article, and yield a request to parse the comments if there is a comment section. ''' article = Article() article['job_id'] = self.job_id article['title'] = response.xpath('//h1[@itemprop="headline"]/text()').extract()[0] article['desc'] = response.xpath('//meta[@itemprop="description"]/@content').extract()[0] author_data = response.xpath('//span[@itemprop="author"]//span[@itemprop="name"]/text()').extract() article['author'] = author_data[0] if len(author_data) > 0 else "" article['date_published'] = response.xpath('//time[@itemprop="datePublished"]/@datetime').extract()[0] article['body'] = response.xpath('//div[@itemprop="articleBody"]//p/text()').extract() article['keywords'] = response.xpath('//a[@itemprop="keywords"]/text()').extract() article['comments_url'] = response.xpath('//a[@data-link-name="View all comments"]/@href').extract()[0] article['comments'] = [] url = article['comments_url'] request = scrapy.Request(url, callback=self.parse_comments) request.meta['article'] = article yield request def parse_comments(self, response): article = response.meta['article'] page_comments = response.xpath('//li[@itemtype="http://schema.org/Comment"]') for c in page_comments: comment = Comment() comment['comment_id'] = c.xpath('@data-comment-id').extract() comment['author'] = c.xpath('@data-comment-author').extract() comment['author_id'] = c.xpath('@data-comment-author-id').extract() comment['reply_count'] = c.xpath('@data-comment-replies').extract() comment['timestamp'] = c.xpath('@data-comment-timestamp').extract() comment['reply_to_author'] = c.xpath('.//span[@class="d-comment__reply-to-author"]').extract() comment['reply_to_comment'] = c.xpath('.//a[contains(@href, "#comment-")]/@href').extract() comment['content'] = c.xpath('.//div[@itemprop="text"]/descendant-or-self::text()').extract() article['comments'].append(comment) current_page = response.xpath('//span[@tabindex="0"]/text()').extract()[0] next_page = int(current_page) + 1 xpath_to_next_page_url = "//a[@data-page='{0}']/@href".format(next_page) next_page_url = response.xpath(xpath_to_next_page_url).extract() if next_page_url: url = next_page_url[0] request = scrapy.Request(url, callback=self.parse_comments) request.meta['article'] = article yield request else: yield article ```
[ { "content": "```python\n# -*- coding: utf-8 -*-\n\nfrom kivy.uix.widget import Widget\nfrom kivy.clock import Clock\nfrom kivy.graphics import Color, Callback, Rotate, PushMatrix, PopMatrix, Translate, Quad\nfrom kivy.graphics.opengl import glBlendFunc, GL_SRC_ALPHA, GL_ONE, GL_ZERO, GL_SRC_COLOR, GL_ONE_MINUS...
[ { "content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nfrom kivy.uix.widget import Widget\nfrom kivy.clock import Clock\nfrom kivy.graphics import Color, Callback, Rotate, PushMatrix, PopMatrix, Translate, Quad\nfrom kivy.graphics.opengl import glBlendFunc, GL_SRC_ALPHA, GL_ONE, GL_ZERO, GL_SRC_COL...
```python # -*- coding: utf-8 -*- from kivy.uix.widget import Widget from kivy.clock import Clock from kivy.graphics import Color, Callback, Rotate, PushMatrix, PopMatrix, Translate, Quad from kivy.graphics.opengl import glBlendFunc, GL_SRC_ALPHA, GL_ONE, GL_ZERO, GL_SRC_COLOR, GL_ONE_MINUS_SRC_COLOR, GL_ONE_MINUS_SRC_ALPHA, GL_DST_ALPHA, GL_ONE_MINUS_DST_ALPHA, GL_DST_COLOR, GL_ONE_MINUS_DST_COLOR from kivy.core.image import Image from kivy.logger import Logger from xml.dom.minidom import parse as parse_xml from .utils import random_variance, random_color_variance from kivy.properties import NumericProperty, BooleanProperty, ListProperty, StringProperty, ObjectProperty import sys import os import math __all__ = ['EMITTER_TYPE_GRAVITY', 'EMITTER_TYPE_RADIAL', 'Particle', 'ParticleSystem'] EMITTER_TYPE_GRAVITY = 0 EMITTER_TYPE_RADIAL = 1 BLEND_FUNC = {0: GL_ZERO, 1: GL_ONE, 0x300: GL_SRC_COLOR, 0x301: GL_ONE_MINUS_SRC_COLOR, 0x302: GL_SRC_ALPHA, 0x303: GL_ONE_MINUS_SRC_ALPHA, 0x304: GL_DST_ALPHA, 0x305: GL_ONE_MINUS_DST_ALPHA, 0x306: GL_DST_COLOR, 0x307: GL_ONE_MINUS_DST_COLOR } class Particle(object): x, y, rotation, current_time = -256, -256, 0, 0 scale, total_time = 1.0, 0. color = [1.0, 1.0, 1.0, 1.0] color_delta = [0.0, 0.0, 0.0, 0.0] start_x, start_y, velocity_x, velocity_y = 0, 0, 0, 0 radial_acceleration, tangent_acceleration = 0, 0 emit_radius, emit_radius_delta = 0, 0 emit_rotation, emit_rotation_delta = 0, 0 rotation_delta, scale_delta = 0, 0 class ParticleSystem(Widget): max_num_particles = NumericProperty(200) life_span = NumericProperty(2) texture = ObjectProperty(None) texture_path = StringProperty(None) life_span_variance = NumericProperty(0) start_size = NumericProperty(16) start_size_variance = NumericProperty(0) end_size = NumericProperty(16) end_size_variance = NumericProperty(0) emit_angle = NumericProperty(0) emit_angle_variance = NumericProperty(0) start_rotation = NumericProperty(0) start_rotation_variance = NumericProperty(0) end_rotation = NumericProperty(0) end_rotation_variance = NumericProperty(0) emitter_x_variance = NumericProperty(100) emitter_y_variance = NumericProperty(100) gravity_x = NumericProperty(0) gravity_y = NumericProperty(0) speed = NumericProperty(0) speed_variance = NumericProperty(0) radial_acceleration = NumericProperty(100) radial_acceleration_variance = NumericProperty(0) tangential_acceleration = NumericProperty(0) tangential_acceleration_variance = NumericProperty(0) max_radius = NumericProperty(100) max_radius_variance = NumericProperty(0) min_radius = NumericProperty(50) rotate_per_second = NumericProperty(0) rotate_per_second_variance = NumericProperty(0) start_color = ListProperty([1., 1., 1., 1.]) start_color_variance = ListProperty([1., 1., 1., 1.]) end_color = ListProperty([1., 1., 1., 1.]) end_color_variance = ListProperty([1., 1., 1., 1.]) blend_factor_source = NumericProperty(770) blend_factor_dest = NumericProperty(1) emitter_type = NumericProperty(0) update_interval = NumericProperty(1. / 30.) _is_paused = BooleanProperty(False) def __init__(self, config, **kwargs): super(ParticleSystem, self).__init__(**kwargs) self.capacity = 0 self.particles = list() self.particles_dict = dict() self.emission_time = 0.0 self.frame_time = 0.0 self.num_particles = 0 if config is not None: self._parse_config(config) self.emission_rate = self.max_num_particles / self.life_span self.initial_capacity = self.max_num_particles self.max_capacity = self.max_num_particles self._raise_capacity(self.initial_capacity) with self.canvas.before: Callback(self._set_blend_func) with self.canvas.after: Callback(self._reset_blend_func) Clock.schedule_once(self._update, self.update_interval) def start(self, duration=sys.maxint): if self.emission_rate != 0: self.emission_time = duration def stop(self, clear=False): self.emission_time = 0.0 if clear: self.num_particles = 0 self.particles_dict = dict() self.canvas.clear() def on_max_num_particles(self, instance, value): self.max_capacity = value if self.capacity < value: self._raise_capacity(self.max_capacity - self.capacity) elif self.capacity > value: self._lower_capacity(self.capacity - self.max_capacity) self.emission_rate = self.max_num_particles / self.life_span def on_texture(self, instance, value): for p in self.particles: try: self.particles_dict[p]['rect'].texture = self.texture except KeyError: # if particle isn't initialized yet, you can't change its texture. pass def on_life_span(self, instance, value): self.emission_rate = self.max_num_particles / value def _set_blend_func(self, instruction): #glBlendFunc(self.blend_factor_source, self.blend_factor_dest) #glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glBlendFunc(GL_SRC_ALPHA, GL_ONE) def _reset_blend_func(self, instruction): glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) def _parse_config(self, config): self._config = parse_xml(config) texture_path = self._parse_data('texture', 'name') config_dir_path = os.path.dirname(os.path.abspath(config)) path = os.path.join(config_dir_path, texture_path) if os.path.exists(path): self.texture_path = path else: self.texture_path = texture_path self.texture = Image(self.texture_path).texture self.emitter_x = float(self._parse_data('sourcePosition', 'x')) self.emitter_y = float(self._parse_data('sourcePosition', 'y')) self.emitter_x_variance = float(self._parse_data('sourcePositionVariance', 'x')) self.emitter_y_variance = float(self._parse_data('sourcePositionVariance', 'y')) self.gravity_x = float(self._parse_data('gravity', 'x')) self.gravity_y = float(self._parse_data('gravity', 'y')) self.emitter_type = int(self._parse_data('emitterType')) self.max_num_particles = int(self._parse_data('maxParticles')) self.life_span = max(0.01, float(self._parse_data('particleLifeSpan'))) self.life_span_variance = float(self._parse_data('particleLifespanVariance')) self.start_size = float(self._parse_data('startParticleSize')) self.start_size_variance = float(self._parse_data('startParticleSizeVariance')) self.end_size = float(self._parse_data('finishParticleSize')) self.end_size_variance = float(self._parse_data('FinishParticleSizeVariance')) self.emit_angle = math.radians(float(self._parse_data('angle'))) self.emit_angle_variance = math.radians(float(self._parse_data('angleVariance'))) self.start_rotation = math.radians(float(self._parse_data('rotationStart'))) self.start_rotation_variance = math.radians(float(self._parse_data('rotationStartVariance'))) self.end_rotation = math.radians(float(self._parse_data('rotationEnd'))) self.end_rotation_variance = math.radians(float(self._parse_data('rotationEndVariance'))) self.speed = float(self._parse_data('speed')) self.speed_variance = float(self._parse_data('speedVariance')) self.radial_acceleration = float(self._parse_data('radialAcceleration')) self.radial_acceleration_variance = float(self._parse_data('radialAccelVariance')) self.tangential_acceleration = float(self._parse_data('tangentialAcceleration')) self.tangential_acceleration_variance = float(self._parse_data('tangentialAccelVariance')) self.max_radius = float(self._parse_data('maxRadius')) self.max_radius_variance = float(self._parse_data('maxRadiusVariance')) self.min_radius = float(self._parse_data('minRadius')) self.rotate_per_second = math.radians(float(self._parse_data('rotatePerSecond'))) self.rotate_per_second_variance = math.radians(float(self._parse_data('rotatePerSecondVariance'))) self.start_color = self._parse_color('startColor') self.start_color_variance = self._parse_color('startColorVariance') self.end_color = self._parse_color('finishColor') self.end_color_variance = self._parse_color('finishColorVariance') self.blend_factor_source = self._parse_blend('blendFuncSource') self.blend_factor_dest = self._parse_blend('blendFuncDestination') def _parse_data(self, name, attribute='value'): return self._config.getElementsByTagName(name)[0].getAttribute(attribute) def _parse_color(self, name): return [float(self._parse_data(name, 'red')), float(self._parse_data(name, 'green')), float(self._parse_data(name, 'blue')), float(self._parse_data(name, 'alpha'))] def _parse_blend(self, name): value = int(self._parse_data(name)) return BLEND_FUNC[value] def pause(self): self._is_paused = True def resume(self): self._is_paused = False Clock.schedule_once(self._update, self.update_interval) def _update(self, dt): self._advance_time(dt) self._render() if not self._is_paused: Clock.schedule_once(self._update, self.update_interval) def _create_particle(self): return Particle() def _init_particle(self, particle): life_span = random_variance(self.life_span, self.life_span_variance) if life_span <= 0.0: return particle.current_time = 0.0 particle.total_time = life_span particle.x = random_variance(self.emitter_x, self.emitter_x_variance) particle.y = random_variance(self.emitter_y, self.emitter_y_variance) particle.start_x = self.emitter_x particle.start_y = self.emitter_y angle = random_variance(self.emit_angle, self.emit_angle_variance) speed = random_variance(self.speed, self.speed_variance) particle.velocity_x = speed * math.cos(angle) particle.velocity_y = speed * math.sin(angle) particle.emit_radius = random_variance(self.max_radius, self.max_radius_variance) particle.emit_radius_delta = (self.max_radius - self.min_radius) / life_span particle.emit_rotation = random_variance(self.emit_angle, self.emit_angle_variance) particle.emit_rotation_delta = random_variance(self.rotate_per_second, self.rotate_per_second_variance) particle.radial_acceleration = random_variance(self.radial_acceleration, self.radial_acceleration_variance) particle.tangent_acceleration = random_variance(self.tangential_acceleration, self.tangential_acceleration_variance) start_size = random_variance(self.start_size, self.start_size_variance) end_size = random_variance(self.end_size, self.end_size_variance) start_size = max(0.1, start_size) end_size = max(0.1, end_size) particle.scale = start_size / self.texture.width particle.scale_delta = ((end_size - start_size) / life_span) / self.texture.width # colors start_color = random_color_variance(self.start_color, self.start_color_variance) end_color = random_color_variance(self.end_color, self.end_color_variance) particle.color_delta = [(end_color[i] - start_color[i]) / life_span for i in range(4)] particle.color = start_color # rotation start_rotation = random_variance(self.start_rotation, self.start_rotation_variance) end_rotation = random_variance(self.end_rotation, self.end_rotation_variance) particle.rotation = start_rotation particle.rotation_delta = (end_rotation - start_rotation) / life_span def _advance_particle(self, particle, passed_time): passed_time = min(passed_time, particle.total_time - particle.current_time) particle.current_time += passed_time if self.emitter_type == EMITTER_TYPE_RADIAL: particle.emit_rotation += particle.emit_rotation_delta * passed_time particle.emit_radius -= particle.emit_radius_delta * passed_time particle.x = self.emitter_x - math.cos(particle.emit_rotation) * particle.emit_radius particle.y = self.emitter_y - math.sin(particle.emit_rotation) * particle.emit_radius if particle.emit_radius < self.min_radius: particle.current_time = particle.total_time else: distance_x = particle.x - particle.start_x distance_y = particle.y - particle.start_y distance_scalar = math.sqrt(distance_x * distance_x + distance_y * distance_y) if distance_scalar < 0.01: distance_scalar = 0.01 radial_x = distance_x / distance_scalar radial_y = distance_y / distance_scalar tangential_x = radial_x tangential_y = radial_y radial_x *= particle.radial_acceleration radial_y *= particle.radial_acceleration new_y = tangential_x tangential_x = -tangential_y * particle.tangent_acceleration tangential_y = new_y * particle.tangent_acceleration particle.velocity_x += passed_time * (self.gravity_x + radial_x + tangential_x) particle.velocity_y += passed_time * (self.gravity_y + radial_y + tangential_y) particle.x += particle.velocity_x * passed_time particle.y += particle.velocity_y * passed_time particle.scale += particle.scale_delta * passed_time particle.rotation += particle.rotation_delta * passed_time particle.color = [particle.color[i] + particle.color_delta[i] * passed_time for i in range(4)] def _raise_capacity(self, by_amount): old_capacity = self.capacity new_capacity = min(self.max_capacity, self.capacity + by_amount) for i in range(int(new_capacity - old_capacity)): self.particles.append(self._create_particle()) self.num_particles = int(new_capacity) self.capacity = new_capacity def _lower_capacity(self, by_amount): old_capacity = self.capacity new_capacity = max(0, self.capacity - by_amount) for i in range(int(old_capacity - new_capacity)): try: self.canvas.remove(self.particles_dict[self.particles.pop()]['rect']) except: pass self.num_particles = int(new_capacity) self.capacity = new_capacity def _advance_time(self, passed_time): particle_index = 0 # advance existing particles while particle_index < self.num_particles: particle = self.particles[particle_index] if particle.current_time < particle.total_time: self._advance_particle(particle, passed_time) particle_index += 1 else: if particle_index != self.num_particles - 1: next_particle = self.particles[self.num_particles - 1] self.particles[self.num_particles - 1] = particle self.particles[particle_index] = next_particle self.num_particles -= 1 if self.num_particles == 0: Logger.debug('Particle: COMPLETE') # create and advance new particles if self.emission_time > 0: time_between_particles = 1.0 / self.emission_rate self.frame_time += passed_time while self.frame_time > 0: if self.num_particles < self.max_capacity: if self.num_particles == self.capacity: self._raise_capacity(self.capacity) particle = self.particles[self.num_particles] self.num_particles += 1 self._init_particle(particle) self._advance_particle(particle, self.frame_time) self.frame_time -= time_between_particles if self.emission_time != sys.maxint: self.emission_time = max(0.0, self.emission_time - passed_time) def _render(self): if self.num_particles == 0: return for i in range(self.num_particles): particle = self.particles[i] size = (self.texture.size[0] * particle.scale, self.texture.size[1] * particle.scale) if particle not in self.particles_dict: self.particles_dict[particle] = dict() color = particle.color[:] with self.canvas: self.particles_dict[particle]['color'] = Color(color[0], color[1], color[2], color[3]) PushMatrix() self.particles_dict[particle]['translate'] = Translate() self.particles_dict[particle]['rotate'] = Rotate() self.particles_dict[particle]['rotate'].set(particle.rotation, 0, 0, 1) self.particles_dict[particle]['rect'] = Quad(texture=self.texture, points=(-size[0] * 0.5, -size[1] * 0.5, size[0] * 0.5, -size[1] * 0.5, size[0] * 0.5, size[1] * 0.5, -size[0] * 0.5, size[1] * 0.5)) self.particles_dict[particle]['translate'].xy = (particle.x, particle.y) PopMatrix() else: self.particles_dict[particle]['rotate'].angle = particle.rotation self.particles_dict[particle]['translate'].xy = (particle.x, particle.y) self.particles_dict[particle]['color'].rgba = particle.color self.particles_dict[particle]['rect'].points = (-size[0] * 0.5, -size[1] * 0.5, size[0] * 0.5, -size[1] * 0.5, size[0] * 0.5, size[1] * 0.5, -size[0] * 0.5, size[1] * 0.5) ```