code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
import json import logging import sys import traceback from collections import defaultdict from greent.concept import Concept from greent.concept import ConceptModel from greent.service import Service from greent.util import LoggingUtil from neo4j.v1 import GraphDatabase logger = logging.getLogger('type_graph') class TypeGraph(Service): """ A graph of * nomenclature systems * conceptual domains in which they participate and * executable transitions translating from one nomenclature system to another Transitions specify the semantics by which operations convert between nomanclature systems. Each nomenclature system is referred to as a Type and recieves a label in the graph. Each concept is created as a node. Each type node with an associated concept * Receives a label for the connected concept * Is the source of an is_a link connecting to the concept node. This enables queries between concept spaces to return alternative paths of operations """ def __init__(self, service_context, concept_model_name="biolink-model", debug=False): """ Construct a type graph, registering labels for concepts and types. """ super(TypeGraph, self).__init__("rosetta-graph", service_context) if debug: logger.setLevel (logging.DEBUG) self.initialize_connection() self.type_to_concept = {} self.concept_model_name = concept_model_name self.set_concept_model () self.TYPE = "Type" self.CONCEPT = "Concept" def initialize_connection(self): """ Connect to the database. """ config = self.get_config () username = config.get ("username") password = config.get ("password") logger.debug(f" -+ Connecting to graph database: {self.url}") self.driver = GraphDatabase.driver(self.url) self.db = GraphDB (self.driver.session ()) def delete_all(self): """ Delete the type-graph only. Leave result graphs alone. """ try: self.db.exec ("MATCH (n:Concept) DETACH DELETE n") self.db.exec ("MATCH (n:Type) DETACH DELETE n") self.initialize_connection() except Exception as e: traceback.print_exc() def set_concept_model(self): """ Build the concept model. """ logger.debug("-- Initializing graph semantic concepts.") self.concept_model = ConceptModel (self.concept_model_name) for concept_name, concept in self.concept_model.items(): if len(concept.id_prefixes) > 0: logger.debug(" -+ concept {} <= {}".format( concept_name, concept.id_prefixes)) for identifier in concept.id_prefixes: self.type_to_concept[identifier] = concept def build_concept (self, concept): """ Build a concept and its semantic backstory including is_a hierarcy. """ if concept: self._find_or_create_concept (concept) if concept.is_a: """ If it has an ancestor, create a node for the ancestor and link the nodes. """ base_class = self._find_or_create_concept (concept.is_a) self.db.create_relationship ( name_a=concept.name, type_a=self.CONCEPT, properties={ "name" : "is_a" }, name_b=concept.is_a.name, type_b=self.CONCEPT) """ Recurse. """ self.build_concept (concept.is_a) def find_or_create(self, name, iri=None): """ Find a type node, creating it if necessary. Link it to a concept. """ properties = { "name" : name, "iri" : iri } result = self.db.get_node (properties, self.TYPE) n = result.peek () if not n: n = self.db.create_type (properties) concept = self.type_to_concept.get(name) if concept: logger.debug(f" adding node {name} to concept {concept.name}") concept_node = self._find_or_create_concept (concept) self.build_concept (concept) self.db.add_label (properties={ "name" : name }, node_type=self.TYPE, label=concept.name) self.db.create_relationship ( name_a=concept.name, type_a=self.CONCEPT, properties={ "name" : "is_a" }, name_b=name, type_b=self.TYPE) return n def add_concepts_edge (self, a, b, predicate, op): """ Add an edge between two concpepts. Include the operation to call to effect the transition. """ a_concept = self.concept_model.get (a) b_concept = self.concept_model.get (b) assert a_concept, f"Unable to find concept {a}" assert b_concept, f"Unable to find concept {b}" a_concept_node = self._find_or_create_concept(a_concept) b_concept_node = self._find_or_create_concept(b_concept) self.db.create_relationship (name_a=a_concept.name, type_a=self.CONCEPT, properties={ "name" : predicate, "predicate" : predicate, "op" : op, "enabled" : True }, name_b=b_concept.name, type_b=self.CONCEPT) def _find_or_create_concept(self, concept): """ Find or create a concept object which will be linked to member type object. """ concept_node = None try: properties = { "name" : concept.name } result = self.db.get_node (properties, node_type=self.CONCEPT) concept_node = result.peek () if not concept_node: concept_node = self.db.create_node(properties, node_type=self.CONCEPT) self.db.add_label(properties, node_type=self.CONCEPT, label=concept.name) except: print ("concept-> {}".format (concept.name)) traceback.print_exc () traceback.print_stack () return concept_node def run_cypher_query(self,query): """ Execute a cypher query and return the result set. """ result = None try: result = self.db.query(query, data_contents=True) except TransactionException: print("Error Generated by:") print (query) result = None return result def get_transitions(self, query): """ Execute a cypher query and walk the results to build a set of transitions to execute. The query should be such that it returns a path (node0-relation0-node1-relation1-node2), and an array of the relation start nodes. For the path above, start nodes like (node0,node1) would indicate a unidirectional path, while (node0,node2) would indicate an end-based path meeting in the middle. Each node in the path can be described with an arbitrary node index. Note that this index does not have to correspond to the order of calling or any structural property of the graph. It simply points to a particular node in the call map. Returns: nodes: A map from a node index to the concept. transitions: a map from a node index to an (operation, output index) pair """ graphs=[] result = self.db.query(query) for row in result: nodes = {} transitions = {} path = row[0] result_rows = [] nodes = { i : n for i, n in enumerate(path.nodes) } for i, element in enumerate(path): result_rows.append (nodes[i]) result_rows.append (element) if i == len(path) - 1: result_rows.append (nodes[i+1]) for i, element in enumerate(result_rows): if i % 2 == 0: #node nodenum = int(i / 2) if logger.isEnabledFor (logging.DEBUG): logger.debug (f"|| Node ||> i:{i} id:{element.id} props:{element.properties}") nodes[nodenum] = element.properties['name'] else: #relationship relnum = int((i-1)/2) if logger.isEnabledFor (logging.DEBUG): logger.debug (f" <>Rel<> > i:{i} relnum:{relnum} strt:{element.start} end:{element.end} props:{element.properties}") if element.start == result_rows[i-1].id: from_node=relnum to_node = relnum+1 elif element.start == result_rows[i+1].id: from_node = relnum+1 to_node = relnum transitions[from_node] = { 'link' : element.properties['predicate'], 'op' : element.properties['op'], 'to' : to_node } graphs.append( (nodes, transitions) ) if logger.isEnabledFor (logging.DEBUG): logger.debug (f"{json.dumps(graphs, indent=2)}") return graphs def get_knowledge_map_programs(self, query): """ Execute a cypher query and walk the results to build a set of transitions to execute. The query should be such that it returns a path (node0-relation0-node1-relation1-node2), and an array of the relation start nodes. This algorithm focuses on linear paths. Returns: a list of list of Frame. """ """ A list of possible executable pathways enacting the input query. """ programs = [] """ Query the database for paths. """ result = self.db.query(query) for row in result: logger.debug (f"row> {row} {type(row)}") path = row[0] """ One path corresponds to one program, or stack of frames. """ program = defaultdict(Frame) node_map = { node.id : node.properties for i, node in enumerate(path.nodes) } for i, relationship in enumerate(path): start_node_name = node_map[relationship.start]['name'] logger.debug (f" -+ adding frame {start_node_name}") frame = program[start_node_name] frame.name = start_node_name print (f" props: {relationship.properties}") if 'op' in relationship.properties: frame.add_operator (op = relationship.properties['op'], predicate = relationship.properties['op']) programs.append (list(program.values ())) for p in programs: print (f" list {p}") return programs class Operator: """ Abstraction of a method to call to effect a transition between two graph nodes. """ def __init__(self, op=None, predicate=None): self.op = op self.predicate = predicate def __repr__(self): return f"Operator(op={self.op},pred={self.predicate})" def __str__(self): return self.__repr__() class Frame: """ A frame represents a set of operations to transition from one concept type to another. Frames may be stacked in a program. """ def __init__(self, name=None, ops=[], collector=[]): self.name = name self.ops = defaultdict(Operator) self.collector = collector def add_operator (self, op, predicate): operator = self.ops[op] operator.op = op operator.predicate = predicate def __repr__(self): ops = [] for k, op in self.ops.items (): ops.append (str(op)) ops = ",".join (ops) return f"Frame(name={self.name},ops=[{ops}])" class GraphDB: """ Encapsulate graph database operations to some extent. """ def __init__(self, session): self.session = session def exec(self, command): """ Execute a cypher command returning the result. """ return self.session.run (command) def query(self, query): """ Synonym for exec for read only contexts. """ return self.exec(query) def get_node(self, properties, node_type=None): """ Get a ndoe given a set of properties and a node type to match on. """ ntype = f":{node_type}" if node_type else "" properties = ",".join ([ f""" {k} : "{v}" """ for k, v in properties.items () ]) return self.exec (f"""MATCH (n{ntype} {{ {properties} }}) RETURN n""") def create_concept (self, properties): """ Shortcut to create a concept node. """ self.create_node (properties, "Concept") def create_type (self, properties): """ Shortcut to create a type node. """ self.create_node (properties, "Type") def create_node(self, properties, node_type=None): """ Create a generic node given a set of properties and a node type. """ ntype = f":{node_type}" if node_type else "" properties = ",".join ([ f""" {k} : "{v}" """ for k, v in properties.items () ]) return self.exec (f"""CREATE (n{ntype} {{ {properties} }}) RETURN n""") def add_label(self, properties, node_type, label): """ Add a label to a node, given properties, a type, and the label to add. """ ntype = f":{node_type}" if node_type else "" properties = ",".join ([ f""" {k} : "{v}" """ for k, v in properties.items () ]) return self.exec ( f""" MATCH (n{ntype} {{ {properties} }}) SET n:{label} RETURN n, labels(n) AS labels""") def create_relationship(self, name_a, type_a, properties, name_b, type_b): """ Create a relationship between two nodes given name and type for each end of the relationship and properties for the relationship itself. """ relname = properties['name'] rprops = ",".join ([ f""" {k} : "{v}" """ for k, v in properties.items () if not k == "name"]) result = self.exec (f"""MATCH (a:{type_a} {{ name: "{name_a}" }})-[:{relname} {{ {rprops} }}]->(b:{type_b} {{ name : "{name_b}" }}) RETURN *""") return result if result.peek () else self.exec ( f""" MATCH (a:{type_a} {{ name: "{name_a}" }}) MATCH (b:{type_b} {{ name: "{name_b}" }}) CREATE (a)-[:{relname} {{ {rprops} }}]->(b)""") class Rel: def __init__(self,start,end): self.start = start self.end = end def __repr__(self): return f"start: {self.start} end: {self.end}"
/robokop-interfaces-0.91.tar.gz/robokop-interfaces-0.91/greent/graph.py
0.441432
0.252039
graph.py
pypi
import argparse import glob import json import os import re import requests import yaml import shutil from greent import node_types from greent.graph_components import KNode,KEdge,elements_to_json from greent.services.ontology import GenericOntology from greent.servicecontext import ServiceContext from flask import Flask, jsonify, g, Response from flasgger import Swagger app = Flask(__name__) template = { "swagger": "2.0", "info": { "title": "Generic Ontology API", "description": "Generic facts about ontologies.", "contact": { "responsibleOrganization": "renci.org", "responsibleDeveloper": "scox@renci.org", "email": "x@renci.org", "url": "www.renci.org", }, "termsOfService": "http://renci.org/terms", "version": "0.0.1" }, # "basePath": "/onto/api", "schemes": [ "http", "https" ] } app.config['SWAGGER'] = { 'title': 'Ontology Service' } swagger = Swagger(app, template=template) class Core: """ Core ontology services. """ def __init__(self): self.onts = {} self.context = service_context = ServiceContext ( config=app.config['SWAGGER']['greent_conf']) data_dir = app.config['onto']['data'] data_pattern = os.path.join (data_dir, "*.obo") ontology_files = glob.glob (data_pattern) for f in ontology_files: print (f"loading {f}") file_name = os.path.basename (f) name = file_name.replace (".obo", "") self.onts[name] = GenericOntology(self.context, f) def ont (self, curie): return self.onts[curie.lower()] if curie and curie.lower() in self.onts else None core = None def get_core (curie=None): global core if not core: print (f"initializing core") core = Core () result = core if curie: if ":" in curie: curie = curie.split(":")[0] result = core.ont (curie) return result @app.route('/is_a/<curie>/<ancestors>/') def is_a (curie, ancestors): """ Determine ancestry. --- parameters: - name: curie in: path type: string required: true default: GO:2001317 description: "An identifier from an ontology. eg, GO:2001317" x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /is_a/{{ input }}/{{ input2 }} - name: ancestors in: path type: array required: true default: GO:1901362 items: type: string description: "A comma separated list of identifiers. eg, GO:1901362" x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /is_a/{{ input }}/{{ input2 }} responses: 200: description: ... """ assert curie, "An identifier must be supplied." assert isinstance(ancestors, str), "Ancestors must be one or more identifiers" ont = get_core (curie) return jsonify ({ "is_a" : ont.is_a(curie, ancestors), "id" : curie, "ancestors" : ancestors }) @app.route('/label/<curie>/') def label (curie): """ Get ontology term label by id. --- parameters: - name: curie in: path type: string required: true default: GO:2001317 description: "An identifier from an ontology. eg, GO:2001317" x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /label/{{ input }}/ responses: 200: description: ... """ core = get_core () label = None for k, v in core.onts.items (): label = v.label (curie) if label: break return jsonify ({ "label" : label, "id" : curie }) @app.route('/search/<pat>/<regex>') def search (pat, regex): """ Search for ids in an ontology based on a pattern, optionally a regular expression. --- parameters: - name: pat in: path type: string required: true default: "kidney" description: "Pattern to search for. .*kojic.*" x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /search/{{ curie }}/{{ pat }}/{{ regex }}/ - name: regex in: path type: boolean required: true default: false description: Is the pattern a regular expession? x-valueType: - http://schema.org/boolean x-requestTemplate: - valueType: http://schema.org/boolean template: /search/{{ curie }}/{{ pat }}/{{ regex }}/ responses: 200: description: ... """ core = get_core () regex = regex=='true' vals = [ ont.search(pat, regex) for name, ont in core.onts.items () ] vals = [ term for term_list in vals for term in term_list ] return jsonify ({ "values" : vals }) @app.route('/xrefs/<curie>') def xrefs (curie): """ Get external references to other ontologies from this id. --- parameters: - name: curie in: path type: string required: true default: "MONDO:0001106" description: "Curie designating an ontology. eg, GO:2001317" x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /xrefs/{{ curie }}/ responses: 200: description: ... """ ont = get_core (curie) return jsonify ({ "xrefs" : [ x.split(' ')[0] if ' ' in x else x for x in ont.xrefs (curie) ] } if ont else {}) @app.route('/lookup/<curie>') def lookup (curie): """ Get ids for which this curie is an external reference. --- parameters: - name: curie in: path type: string required: true default: "OMIM:143100" description: "Curie designating an external reference." x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /lookup/{{ curie }}/ responses: 200: description: ... """ core = get_core () return jsonify ({ "refs" : [ ref for name, ont in core.onts.items() for ref in ont.lookup (curie) ] }) @app.route('/synonyms/<curie>/') def synonyms (curie): """ Get synonym terms for the given curie. --- parameters: - name: curie in: path type: string required: true default: "GO:0000009" description: "Curie designating an ontology. eg, GO:0000009" x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /synonyms/{{ curie }}/ responses: 200: description: ... """ result = [] ont = get_core (curie) if ont: syns = ont.synonyms (curie) if syns: for syn in syns: result.append ({ "desc" : syn.desc, "scope" : syn.scope, "syn_type" : syn.syn_type.name if syn.syn_type else None, "xref" : syn.xref }) return jsonify (result) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Rosetta Server') parser.add_argument('-p', '--port', type=int, help='Port to run service on.', default=5000) parser.add_argument('-d', '--debug', help="Debug.", default=False) parser.add_argument('-t', '--data', help="Ontology data source.", default=".") parser.add_argument('-c', '--conf', help='GreenT config file to use.', default="greent.conf") args = parser.parse_args () app.config['SWAGGER']['greent_conf'] = args.greent_conf = args.conf app.config['onto'] = { 'config' : args.conf, 'data' : args.data, 'debug' : args.debug } app.run(host='0.0.0.0', port=args.port, debug=True, threaded=True)
/robokop-interfaces-0.91.tar.gz/robokop-interfaces-0.91/greent/api/onto.py
0.513425
0.165222
onto.py
pypi
import argparse import json import os import requests import yaml import shutil try: from smartBag.grok import SemanticCrunch except: print ("smartbag not in path. skipping import.") from greent.rosetta import Rosetta from greent import node_types from greent.graph_components import KNode,KEdge,elements_to_json from flask import Flask, jsonify, g, Response from flasgger import Swagger app = Flask(__name__) template = { "swagger": "2.0", "info": { "title": "X-API", "description": "API for X data", "contact": { "responsibleOrganization": "x-org", "responsibleDeveloper": "x-dev", "email": "x@x.org", "url": "www.x.org", }, "termsOfService": "http://x.org/terms", "version": "0.0.1" }, # "host": "host.x", # overrides localhost:500 # "basePath": "/api", # base bash for blueprint registration "schemes": [ "http", "https" ] } app.config['SWAGGER'] = { 'title': 'Rosetta Service', 'bag_source' : '/.' } swagger = Swagger(app, template=template) rosetta = None def get_rosetta (): global rosetta if not rosetta: config = app.config['SWAGGER']['greent_conf'] rosetta = Rosetta (debug=True, greentConf=config) return rosetta @app.route('/cop/') def cop (drug="imatinib", disease="asthma"): """ Get service metadata --- parameters: - name: drug in: path type: string required: false default: imatinib x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /query?drug={{ input }} - name: disease in: path type: string required: false default: asthma x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /query?disease={{ input }} responses: 200: description: ... """ return jsonify ( get_rosetta().construct_knowledge_graph(**{ "inputs" : { "disease" : [ disease ] }, "query" : """MATCH (a:disease),(b:gene), p = allShortestPaths((a)-[*]->(b)) WHERE NONE (r IN relationships(p) WHERE type(r) = 'UNKNOWN' OR r.op is null) RETURN p""" }) + get_rosetta().construct_knowledge_graph(**{ "inputs" : { "drug" : [ drug ] }, "query" : """MATCH (a:drug),(b:gene), p = allShortestPaths((a)-[*]->(b)) WHERE NONE (r IN relationships(p) WHERE type(r) = 'UNKNOWN' OR r.op is null) RETURN p""" }) ) @app.route('/query/<inputs>/<query>') def query (inputs, query): """ Get service metadata --- parameters: - name: inputs in: path type: string required: true default: drug=MESH:D000068877,DRUGBANK:DB00619 description: A key value pair where the key is a biolink-model concept and the value is a comma separated list of curies. eg, concept=curie:id\[,curie:id\] x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /query?inputs={{ input }} - name: query in: path type: string required: true description: A cypher query over the biolink-model concept space returning a shortest path. default: > MATCH (a:drug),(b:pathway), p = allShortestPaths((a)-[*]->(b)) WHERE NONE (r IN relationships(p) WHERE type(r)=UNKNOWN OR r.op is null) RETURN p' x-valueType: - http://schema.org/string x-requestTemplate: - valueType: http://schema.org/string template: /query?inputs={{ input }}&query={{ query }} responses: 200: description: ... """ """ Validate input ids structure is <concept>=<id>[,<id>]* """ if '=' not in inputs: raise ValueError ("Inputs must be key value of concept=<comma separated ids>") concept, items =inputs.split ("=") query = query.replace ("UNKNOWN", "'UNKNOWN'") args = { "inputs" : { concept : items.split (",") }, "query" : query } print (f" args => {json.dumps (args, indent=2)}") blackboard = get_rosetta().construct_knowledge_graph(**args) nodes = set([ e.target_node for e in blackboard ] + [ e.source_node for e in blackboard ]) ''' Do we really need different ids here? node_ids = {} for i, n in enumerate(nodes): node_ids[n.identifier] = i ''' # propagate this back to an edge standard. for e in blackboard: if not 'stdprop' in e.properties: e.properties['stdprop'] = {} e.properties['stdprop']['src'] = e.source_node.identifier e.properties['stdprop']['dst'] = e.target_node.identifier return jsonify ({ "edges" : [ elements_to_json(e) for e in blackboard ], "nodes" : [ elements_to_json(e) for e in nodes ] }) @app.route('/smartbag/compile/<bag_url>/') def smartbag_compile (bag_url): """ Given a smartBag URL, fetch the bag and compile it to a smartAPI. --- parameters: - name: bag_url in: path type: string required: true x-valueType: - http://schema.org/url x-requestTemplate: - valueType: http://schema.org/url template: /url={{ bag_url }} responses: 200: x-responseValueType: - path: x.y.z valueType: http://x.y/z x-JSONLDContext: /x.jsonld """ bag_source = app.config['SWAGGER']['bag_source'] bag_url = f"{bag_source}/{bag_url}" print (bag_url) bag_archive_file = bag_url.split ("/")[-1] print (f"bag archive: {bag_archive_file}") bag_base_name = bag_archive_file.replace (".tgz", "").replace (".zip", "") out_dir = os.path.join ("smartbag", "work", bag_base_name) if os.path.exists (out_dir): shutil.rmtree (out_dir) os.makedirs (out_dir) bag_archive_file_fq = os.path.join (out_dir, bag_archive_file) if bag_url.startswith ("http"): r = requests.get (bag_url, stream=True) with open(bag_archive_file_fq, 'wb') as outf: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks outf.write(chunk) else: shutil.copyfile (bag_url, bag_archive_file_fq) manifest = SemanticCrunch.generate_smartapi( bag=bag_archive_file_fq, output_dir=out_dir, title="TODO-Title") return jsonify(manifest) if __name__ == "__main__": print (""" ____ __ __ / __ \ ____ _____ ___ / /_ / /_ ____ _ / /_/ / / __ \ / ___/ / _ \ / __/ / __/ / __ `/ / _, _/ / /_/ / (__ ) / __// /_ / /_ / /_/ / version 0.0.1 /_/ |_| \____/ /____/ \___/ \__/ \__/ \__,_/ """) parser = argparse.ArgumentParser(description='Rosetta Server') parser.add_argument('-s', '--bag-source', help='Filesystem path or URL serving bags.', default='.') parser.add_argument('-p', '--port', type=int, help='Port to run service on.', default=None) parser.add_argument('-c', '--conf', help='GreenT config file to use.', default=None) args = parser.parse_args () app.config['SWAGGER']['bag_source'] = args.bag_source app.config['SWAGGER']['greent_conf'] = args.greent_conf = args.conf app.run(host='0.0.0.0', port=args.port, debug=True, threaded=True)
/robokop-interfaces-0.91.tar.gz/robokop-interfaces-0.91/greent/api/server.py
0.452778
0.176707
server.py
pypi
from greent.service import Service from greent.triplestore import TripleStore from greent.util import LoggingUtil from greent.util import Text from greent.graph_components import KEdge, KNode from greent import node_types import datetime logger = LoggingUtil.init_logging (__file__) class ChemBioKS(Service): """ Generic service endpoints for medical and bio-chemical data. This set comprises portions of chem2bio2rdf (CTD, KEGG, PubChem, DRUGBANK) """ def __init__(self, context): #triplestore): super(ChemBioKS, self).__init__("chembio", context) self.triplestore = TripleStore (self.url) def query_chembio (self, query): """ Execute and return the result of a SPARQL query. """ return self.triplestore.execute_query (query) def get_exposure_conditions (self, chemicals): """ Identify conditions (MeSH IDs) triggered by the specified stressor agent ids (also MeSH IDs). :param chemicals: List of IDs for substances of interest. :type chemicals: list of MeSH IDs, eg. D052638 """ id_list = ' '.join (list(map (lambda d : "( mesh:{0} )".format (d), chemicals))) text = self.triplestore.get_template ("ctd_gene_expo_disease").\ safe_substitute (chemicals=id_list) results = self.triplestore.execute_query (text) return list(map (lambda b : { "chemical" : b['chemical'].value, "gene" : b['gene'].value, "pathway" : b['kegg_pathway'].value, "pathName" : b['pathway_name'].value, "pathID" : b['pathway_id'].value, "human" : '(human)' in b['pathway_name'].value }, results.bindings)) def get_drugs_by_condition (self, conditions): """ Get drugs associated with a set of conditions. :param conditions: Conditions to find associated drugs for. :type conditions: List of MeSH IDs for conditions, eg.: D001249 """ if not isinstance (conditions,list): conditions = [ conditions ] conditions = list(map(lambda v : v.replace ("MESH:", "mesh:"), conditions)) prefix = "mesh:" if any(map(lambda v : v.startswith(prefix), conditions)): prefix = "" condition_list = ', '.join (list(map (lambda d : " {0}{1} ".format (prefix, d) , conditions))) result = self.triplestore.query_template ( inputs = { "diseaseIds" : condition_list.lower () }, outputs = [ 'drugID', 'drugGenericName', 'pubChemCID', 'diseasePMIDs' ], template_text=""" prefix mesh: <http://bio2rdf.org/mesh:> prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> prefix db_resource: <http://chem2bio2rdf.org/drugbank/resource/> select ?drugID ?drugGenericName ?diseasePMIDs ?ctdChemDis ?pubChemCID where { values ( ?diseaseId ) { ( $diseaseIds ) } ?ctdChemDis ctd:cid ?pubChemCID; ctd:diseaseid ?diseaseId; ctd:pubmedids ?diseasePMIDs. ?dbInter db_resource:Name ?name ; db_resource:DBID ?drugID . ?drugID db_resource:CID ?pubChemCID ; db_resource:Generic_Name ?drugGenericName . }""") return result def get_drugs_by_condition_graph (self, conditions): drugs = self.get_drugs_by_condition (conditions.identifier) results = [] for r in drugs: edge = KEdge ('c2b2r', 'conditionToDrug', { 'cid' : r['pubChemCID'], 'pmids' : r['diseasePMIDs'] }) node = KNode (r['drugID'].split('/')[-1:][0], #"http://chem2bio2rdf.org/drugbank/resource/drugbank_drug", node_types.DRUG, r['drugGenericName']) results.append ( (edge, node) ) #logger.debug ("chembio drugs by condition: {}".format (results)) return results def get_genes_pathways_by_disease (self, diseases): """ Get genes and pathways associated with specified conditions. :param diseases: List of conditions designated by MeSH ID. :return: Returns a list of dicts containing gene and path information. """ diseaseMeshIDList = ' '.join (list(map (lambda d : "( mesh:{0} )".format (d), diseases))) text = self.triplestore.get_template ("genes_pathways_by_disease").safe_substitute (diseaseMeshIDList=diseaseMeshIDList) results = self.triplestore.execute_query (text) return list(map (lambda b : { "uniprotGene" : b['uniprotGeneID'].value, "keggPath" : b['keggPath'].value, "pathName" : b['pathwayName'].value, "human" : '(human)' in b['pathwayName'].value }, results.bindings)) def get_drug_gene_disease (self, disease_name, drug_name): """ Identify targets and diseases assocaited with a drug name. :param disease_name: MeSH name of a disease condition. :type str: String :param drug_name: Name of a drug. :type str: String """ text = self.triplestore.get_template ("drug_gene_disease").safe_substitute ( diseaseName=disease_name, drugName=drug_name) results = self.triplestore.execute_query (text) return list(map (lambda b : { "uniprotSymbol" : b['uniprotSym'].value, "diseaseId" : b['diseaseID'].value }, results.bindings)) def pubchem_to_ncbigene (self, pubchemID): result = self.triplestore.query_template ( inputs = { "pubchemID" : "pubchem:{}".format(pubchemID) }, outputs = [ 'NCBIGene', 'meshID', 'interaction', 'interactionTypes', 'pubmedids' ], template_text=""" prefix pubchem: <http://chem2bio2rdf.org/pubchem/resource/pubchem_compound/> prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> select distinct ?NCBIGene ?meshID ?interaction ?interactionTypes ?pubmedids where { ?ctdChemGene ctd:cid $pubchemID; ctd:chemicalid ?meshID ; ctd:geneid ?NCBIGene; ctd:interaction ?interaction; ctd:interactiontypes ?interactionTypes; ctd:pubmedids ?pubmedids. }""") return list(map(lambda r : { 'NCBIGene' : r['NCBIGene'], 'meshID' : r['meshID'], 'interaction': r['interaction'], 'interactionTypes': r['interactionTypes'], 'pubmedids' : r['pubmedids'] }, result)) def drug_name_to_gene_symbol (self, drug_name): result = self.triplestore.query_template ( inputs = { "drugName" : drug_name }, outputs = [ 'uniprotSym', 'pmids', 'drugID' ], template_text=""" prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> prefix db_resource: <http://chem2bio2rdf.org/drugbank/resource/> select ?drugGenericName ?pmids ?drugID ?uniprotSym where { values ( ?drugName ) { ( "$drugName" ) } ?ctdChemGene ctd:cid ?pubChemCID; ctd:pubmedids ?pmids; ctd:gene ?uniprotSym . ?drugID db_resource:CID ?pubChemCID ; db_resource:Generic_Name ?drugGenericName . filter regex(lcase(str(?drugGenericName)), lcase(?drugName)) }""") return list(map(lambda r : { 'uniprotSym' : r['uniprotSym'], 'pmids' : r.get('pmids', None), 'drugID' : r['drugID'] }, result)) def drugname_to_pubchem(self, drug_name): result = self.triplestore.query_template ( inputs = { "drugName" : drug_name }, outputs = [ 'pubChemID', 'drugGenericName' ], template_text=""" prefix db_resource: <http://chem2bio2rdf.org/drugbank/resource/> select distinct ?pubChemID ?drugGenericName where { values ( ?drugName ) { ( "$drugName" ) } ?drugID db_resource:CID ?pubChemID ; db_resource:Generic_Name ?drugGenericName . filter regex(lcase(str(?drugGenericName)), lcase(?drugName)) }""") return list(map(lambda r : { 'drugID' : r['pubChemID'], 'drugName' : r['drugGenericName'] }, result)) def gene_symbol_to_pathway (self, uniprot_symbol): return self.triplestore.query_template ( inputs = { "uniprotSymbol" : uniprot_symbol }, outputs = [ "keggPath" ], template_text=""" prefix kegg: <http://chem2bio2rdf.org/kegg/resource/> prefix pharmgkb: <http://chem2bio2rdf.org/pharmgkb/resource/> prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> select ?ctdGene ?uniprotID ?pathwayName ?keggPath where { values ( ?ctdGene ) { ( <$uniprotSymbol> ) } ?keggPath kegg:protein ?uniprotID ; kegg:Pathway_name ?pathwayName . ?pharmGene pharmgkb:Symbol ?ctdGene ; pharmgkb:UniProt_Id ?uniprotID. ?ctdChemGene ctd:gene ?ctdGene. } LIMIT 500 """) def uniprot_to_hgnc (self, uniprot_symbol): return self.triplestore.query_template ( inputs = { "uniprotID" : Text.un_curie (uniprot_symbol.identifier) }, outputs = [ "hgncID" ], template_text=""" prefix uniprot: <http://chem2bio2rdf.org/uniprot/resource/gene/> prefix owl: <http://www.w3.org/2002/07/owl#> prefix hgnc: <http://chem2bio2rdf.org/rdf/resource/hgnc/> select distinct ?hgncID where { values ( ?uniprotID ) { ( uniprot:${uniprotID} ) } ?uniprotID <http://www.w3.org/2002/07/owl#sameAs> ?hgncID. filter ( strstarts (str(?hgncID), "http://bio2rdf.org/gene:")) } """) def graph_uniprot_to_hgnc (self, uniprot_symbol): result = self.uniprot_to_hgnc (uniprot_symbol) return [ ( self.get_edge (r, predicate='synonym'), KNode('HGNC:{0}'.format (r['hgncID'].split(':')[-1]), node_types.GENE)) for r in result ] def graph_get_genes_by_disease (self, disease): #reasoner disease = disease.identifier.split (':')[1].lower () response = self.get_genes_pathways_by_disease ([ disease ]) results = [] for r in response: edge = KEdge ('c2b2r', 'diseaseToGene', { 'keggPath' : r['keggPath'] }) node = KNode ("UNIPROT:{0}".format (r['uniprotGene'].split('/')[-1:][0]), node_types.GENE) results.append ( (edge, node) ) return results def graph_get_pathways_by_gene (self, gene): #reasoner response = self.triplestore.query_template ( inputs = { "gene" : gene.identifier.split(':')[1].upper () }, outputs = [ 'keggPath' ], template_text=""" prefix kegg: <http://chem2bio2rdf.org/kegg/resource/> prefix drugbank: <http://chem2bio2rdf.org/drugbank/resource/> prefix uniprot: <http://chem2bio2rdf.org/uniprot/resource/gene/> prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> prefix mesh: <http://bio2rdf.org/mesh:> select ?drugGenericName ?uniprotGeneID ?pathwayName ?keggPath where { ?keggPath kegg:protein ?swissProtID ; kegg:Pathway_name ?pathwayName . ?keggInter kegg:cid ?pubchemCID . ?dbInter drugbank:GeneBank_ID ?geneBankID ; drugbank:SwissProt_ID ?swissProtID ; drugbank:gene ?uniprotGeneID . ?drugID drugbank:CID ?pubchemCID ; drugbank:Generic_Name ?drugGenericName . ?ctd_disease ctd:diseaseid ?diseaseID ; ctd:cid ?pubchemCID . values ( ?uniprotGeneID ) { ( uniprot:$gene ) } } LIMIT 2000""") results = [] for r in response: edge = KEdge ('c2b2r', 'geneToPathway', {}) node = KNode ("KEGG:{0}".format (r['keggPath'].split('/')[-1:][0]), node_types.PATHWAY) results.append ( (edge, node) ) return results def graph_drugname_to_gene_symbol (self, drug_name_node): drug_name = Text.un_curie (drug_name_node.identifier) response = self.drug_name_to_gene_symbol (drug_name) results = [] for r in response: edge = self.get_edge (r, predicate="targets") node = KNode ("UNIPROT:{0}".format (Text.path_last (r['uniprotSym'])), node_types.GENE) results.append ( (edge, node) ) return results def graph_name_to_drugbank (self, drug_name_node): drug_name = Text.un_curie (drug_name_node.identifier) response = self.drug_name_to_gene_symbol (drug_name) results = [] for r in response: edge = self.get_edge (r, predicate="drugname") node = KNode ("DRUGBANK:{0}".format (Text.path_last (r['drugID'])), \ node_types.DRUG, \ label=r['drugName']) results.append ( (edge, node) ) return results def graph_get_pathways_by_gene (self, gene): #reasoner response = self.triplestore.query_template ( inputs = { "gene" : gene.identifier.split(':')[1].upper () }, outputs = [ 'keggPath' ], template_text=""" prefix kegg: <http://chem2bio2rdf.org/kegg/resource/> prefix drugbank: <http://chem2bio2rdf.org/drugbank/resource/> prefix uniprot: <http://chem2bio2rdf.org/uniprot/resource/gene/> prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> prefix mesh: <http://bio2rdf.org/mesh:> select ?drugGenericName ?uniprotGeneID ?pathwayName ?keggPath where { ?keggPath kegg:protein ?swissProtID ; kegg:Pathway_name ?pathwayName . ?keggInter kegg:cid ?pubchemCID . ?dbInter drugbank:GeneBank_ID ?geneBankID ; drugbank:SwissProt_ID ?swissProtID ; drugbank:gene ?uniprotGeneID . ?drugID drugbank:CID ?pubchemCID ; drugbank:Generic_Name ?drugGenericName . ?ctd_disease ctd:diseaseid ?diseaseID ; ctd:cid ?pubchemCID . values ( ?uniprotGeneID ) { ( uniprot:$gene ) } } LIMIT 2000""") results = [] for r in response: edge = KEdge ('c2b2r', 'geneToPathway', {}) node = KNode ("KEGG:{0}".format (r['keggPath'].split('/')[-1:][0]), node_types.PATHWAY) results.append ( (edge, node) ) return results def graph_drugbank_to_uniprot (self, drugbank): response = self.triplestore.query_template ( inputs = { "drugID" : "DB{0}".format (Text.un_curie (drugbank.identifier)) }, outputs = [ "uniprotGeneID" ], template_text = """ prefix drugbank: <http://chem2bio2rdf.org/drugbank/resource/> prefix drugbank_drug: <http://chem2bio2rdf.org/drugbank/resource/drugbank_drug/> prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> select distinct ?uniprotGeneID where { values ( ?drugID ) { ( drugbank_drug:${drugID} ) } ?dbInter drugbank:GeneBank_ID ?geneBankID ; drugbank:gene ?uniprotGeneID . ?drugID drugbank:CID ?pubchemCID ; drugbank:Generic_Name ?drugGenericName . ?ctd_disease ctd:diseaseid ?diseaseID ; ctd:cid ?pubchemCID . }""") return [ ( self.get_edge (r, predicate='targets'), KNode ("UNIPROT:{0}".format (r['uniprotGeneID'].split('/')[-1:][0]), node_types.GENE) ) for r in response ] def graph_diseasename_to_uniprot (self, disease): results = [] response = self.triplestore.query_template ( inputs = { "diseaseName" : Text.un_curie (disease.identifier) }, outputs = [ "pubChemCID" ], template_text = """ prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> select distinct ?pubChemCID where { values ( ?diseaseName ) { ( "$diseaseName" ) } ?ctdChemDis ctd:cid ?pubChemCID; ctd:diseasename ?diseaseNameRec. filter regex(lcase(str(?diseaseNameRec)), lcase(?diseaseName)) } LIMIT 1""") if len(response) > 0: # This is a disease. response = self.triplestore.query_template ( inputs = { "diseaseName" : Text.un_curie(disease.identifier) }, outputs = [ "disPmids", "chemPmids", "uniprotSym" ], template_text = """ prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> select ?disPmids ?ctdChemDis ?chemPmids ?uniprotSym ?diseaseId where { values ( ?diseaseName ) { ( "$diseaseName" ) } ?ctdChemGene ctd:cid ?pubChemCID; ctd:pubmedids ?chemPmids; ctd:gene ?uniprotSym. ?ctdChemDis ctd:cid ?pubChemCID; ctd:diseaseid ?diseaseId; ctd:diseasename ?diseaseNameRec; ctd:pubmedids ?disPmids. filter regex(lcase(str(?diseaseNameRec)), lcase(?diseaseName)) } LIMIT 500""") for r in response: chemPmids = r['chemPmids'] disPmids = r['disPmids'] pmids = chemPmids + "|" + disPmids edge = self.get_edge (r, predicate='caused_by', pmids=pmids), node = KNode ("UNIPROT:{0}".format (r['uniprotSym'].split('/')[-1:][0]), node_types.GENE) results.append ( (edge, node) ) return results def graph_diseaseid_to_uniprot (self, drugbank): print( drugbank.identifier.lower() ) response = self.triplestore.query_template ( inputs = { "diseaseID" : drugbank.identifier.lower () }, outputs = [ "uniprotGeneID" ], template_text = """ prefix drugbank: <http://chem2bio2rdf.org/drugbank/resource/> prefix drugbank_drug: <http://chem2bio2rdf.org/drugbank/resource/drugbank_drug/> prefix ctd: <http://chem2bio2rdf.org/ctd/resource/> prefix mesh.disease: <http://bio2rdf.org/mesh:> select distinct ?uniprotGeneID where { values ( ?diseaseID ) { ( $diseaseID ) } ?dbInter drugbank:gene ?uniprotGeneID . ?drugID drugbank:CID ?pubchemCID. ?ctd_disease ctd:diseaseid ?diseaseID ; ctd:cid ?pubchemCID . }""") return [ ( self.get_edge (r, predicate='targets'), KNode ("UNIPROT:{0}".format (r['uniprotGeneID'].split('/')[-1:][0]), node_types.GENE) ) for r in response ] def graph_drugname_to_pubchem( self, drugname_node): drug_name = Text.un_curie (drugname_node.identifier) response = self.drugname_to_pubchem(drug_name) return [ (self.get_edge( r, predicate='drugname_to_pubchem'), \ KNode( "PUBCHEM:{}".format( r['drugID'].split('/')[-1]), node_types.DRUG, label=r['drugName'])) for r in response ] # 'NCBIGene' : r['NCBIGene'], # 'meshID' : r['meshID'], # 'interaction': r['interaction'], # 'interactionTypes': r['interactionTypes'] # 'pubmedids' : r['pubmedids'] def graph_pubchem_to_ncbigene( self, pubchem_node): #The compound mesh coming back from here is very out of date. Ignore. pubchemid = Text.un_curie (pubchem_node.identifier) response = self.pubchem_to_ncbigene(pubchemid) retvals = [] for r in response: props = {} props['interaction'] = r['interaction'] props['interactionTypes'] = r['interactionTypes'] props['publications'] = r['pubmedids'].split('|') retvals.append( (self.get_edge( props, predicate='pubchem_to_ncbigene'), KNode( "NCBIGene:{}".format( r['NCBIGene']), node_types.GENE) ) ) return retvals
/robokop-interfaces-0.91.tar.gz/robokop-interfaces-0.91/greent/services/chembio.py
0.633183
0.251096
chembio.py
pypi
import logging import requests from greent.service import Service import greent.util from greent.graph_components import KNode, KEdge from greent.util import Text from greent import node_types logger = greent.util.LoggingUtil.init_logging(__file__, level=logging.DEBUG) class CTD(Service): """ Interface to the Comparative Toxicogenomic Database data set.""" def __init__(self, context): super(CTD, self).__init__("ctd", context) def drugname_string_to_drug_identifier(self,drugname): #First, check to see if the name is already an exact name of something chemnamerows = requests.get (f"{self.url}CTD_chemicals_ChemicalName/{drugname}/").json () keepers = [ x for x in chemnamerows if x['ChemicalName'].upper() == drugname.upper()] if len(keepers) == 0: #Didn't find exact name match, so now see if there is an exact synonym synonamerows = requests.get (f"{self.url}CTD_chemicals_Synonyms/{drugname}/").json () for row in synonamerows: synonyms = [syn.upper() for syn in row['Synonyms'].split('|')] if drugname.upper() in synonyms: keepers.append(row) return [ f"{r['ChemicalID']}" for r in keepers ] def drugname_string_to_drug(self, drugname): identifiers = self.drugname_string_to_drug_identifier(drugname) return [ KNode(identifier, node_types.DRUG) for identifier in identifiers ] def drug_to_gene(self, subject): output = [] for identifier in subject.synonyms: if Text.get_curie(identifier).upper() == 'MESH': obj = requests.get (f"{self.url}/CTD_chem_gene_ixns_ChemicalID/{Text.un_curie(identifier)}/").json () output += [ ( self.get_edge(props=r, pmids=r['PubMedIDs']), KNode(f"NCBIGENE:{r['GeneID']}", node_types.GENE) ) for r in obj ] return output def gene_to_drug(self, subject): output = [] for identifier in subject.synonyms: if Text.get_curie(identifier).upper() == 'NCBIGENE': obj = requests.get (f"{self.url}/CTD_chem_gene_ixns_GeneID/{Text.un_curie(identifier)}/").json () output += [( self.get_edge(props=r, pmids=r['PubMedIDs']), KNode(f"MESH:{r['ChemicalID']}", node_types.DRUG) ) for r in obj ] return output
/robokop-interfaces-0.91.tar.gz/robokop-interfaces-0.91/greent/services/ctd.py
0.41052
0.206094
ctd.py
pypi
import pronto import re import logging from greent.util import LoggingUtil from greent.service import Service from pronto.relationship import Relationship logger = LoggingUtil.init_logging (__file__, level=logging.DEBUG) class GenericOntology(Service): """ Sure, don't just dig around in obo files they say. But when the SPARQL is dry, we will drink straight from the obo if need be. """ def __init__(self, context, obo): """ Load an obo file. """ super(GenericOntology, self).__init__("go", context) self.ont = pronto.Ontology (obo) def label(self,identifier): """Return the label for an identifier""" return self.ont[identifier].name if identifier in self.ont else None def is_a(self,identifier, term): """Determine whether a term has a particular ancestor""" print (f"is {identifier} a {term}?") is_a = False is_a_rel = Relationship('is_a') if identifier in self.ont: #parents = self.ont[identifier].parents the_term = self.ont[identifier] parents = the_term.relations[is_a_rel] if is_a_rel in the_term.relations else [] print (f"{identifier} parents {parents}") for ancestor in parents: ancestor_id = ancestor.id if ' ' in ancestor.id: ancestor_id = ancestor.id.split(' ')[0] print (f" ancestor: {ancestor_id}") is_a = ancestor_id == term if is_a: break if 'xref' in ancestor.other: for xancestor in ancestor.other['xref']: print (f" ancestor-xref: {xancestor} ?=~ {term}") is_a = xancestor.startswith (term) if is_a: break if not is_a: is_a = self.is_a (ancestor_id, term) if is_a: break print (f"{identifier} is_a {term} => {is_a}") return is_a def xrefs(self, identifier): """ Get external references. """ result = [] if identifier in self.ont: result = self.ont[identifier].other['xref'] if 'xref' in self.ont[identifier].other else [] result = [ x.split(' ') if ' ' in x else [x, ''] for x in result ] result = [ { 'id' : x[0], 'desc' : x[1] } for x in result if len(x) == 2 and ':' in x[1] ] return result def synonyms(self, identifier, curie_pattern=None): """ Get synonyms. """ return \ [ x for x in self.ont[identifier].synonyms if curie_pattern and x.startswith(curie_pattern) ] + \ [ syn for syn in self.ont[identifier].synonyms ] \ if identifier in self.ont else [] def search (self, text, is_regex=False, ignore_case=True): """ Search for the text, treating it as a regular expression if indicated. """ print (f"text: {text} is_regex: {is_regex}, ignore_case: {ignore_case}") pat = None if is_regex: pat = re.compile(text, re.IGNORECASE) if ignore_case else re.compile(text) result = {} for term in self.ont: if is_regex: if pat.match (term.name): logger.debug (f" matched {text} pattern in term name: {term.name}") result[term.id] = term else: for syn in term.synonyms: if pat.match (syn.desc): logger.debug (f" matched {text} pattern in synonym: {syn.desc}") result[term.id] = term else: if text.lower() == term.name.lower(): logger.debug (f" text {text} == term name {term.name}") result[term.id] = term else: for syn in term.synonyms: if text.lower() == syn.desc.lower(): logger.debug (f" text {text.lower()} == synonym: {syn.desc.lower()}") result[term.id] = term result = [ { "id" : term.id, "label" : term.name } for key, term in result.items () ] return result def lookup(self, identifier): """ Given an identifier, find ids in the ontology for which it is an xref. """ assert identifier and ':' in identifier, "Must provide a valid identifier." result = [] for term in self.ont: xrefs = [] if 'xref' in term.other: for xref in term.other['xref']: if xref.startswith (identifier): if ' ' in xref: xref_pair = xref.split(' ') xref_pair = [ xref_pair[0], ' '.join (xref_pair[1:]) ] else: xref_pair = [xref, ''] print (f"xref_pair: {xref_pair}") xrefs.append ({ 'id' : xref_pair[0], 'desc' : xref_pair[1] }) if len(xrefs) > 0: result.append ({ "id" : term.id, "xrefs" : xrefs }) return result
/robokop-interfaces-0.91.tar.gz/robokop-interfaces-0.91/greent/services/ontology.py
0.451568
0.289797
ontology.py
pypi
import requests from greent import node_types from greent.graph_components import KNode, KEdge from greent.service import Service #A map from identifiers.org namespaces (curie prefixes) to how HGNC describes these things prefixes_to_hgnc = { 'HGNC': 'hgnc_id', 'NCBIGENE': 'entrez_id', 'NCBIGene': 'entrez_id', 'HGNC.SYMBOL': 'symbol', 'OMIM': 'omim_id', #UNIPROTKB is not a identifiers.org prefix. Uniprot is, and uniprot.isoform is. 'UNIPROTKB': 'uniprot_ids', 'ENSEMBL': 'ensembl_gene_id' } hgnc_to_prefixes = { v: k for k,v in prefixes_to_hgnc.items()} class HGNC(Service): """ Generic GENE id translation service. Essentially a highly generic synonym finder. """ def __init__(self, context): super(HGNC, self).__init__("hgnc", context) def get_name(self, node): """Given a node for an hgnc, return the name for that id""" if node.node_type != node_types.GENE: raise ValueError('Node must be a gene') identifier_parts = node.identifier.split(':') if identifier_parts[0] == 'HGNC': query_string='hgnc_id' elif identifier_parts[0].upper() == 'NCBIGENE': query_string = 'entrez_id' else: raise ValueError('Node must represent an HGNC or NCBIGene id.') hgnc_id = identifier_parts[1] headers = {'Accept':'application/json'} r = requests.get('%s/%s/%s' % (self.url, query_string, hgnc_id), headers= headers).json() try: symbol = r['response']['docs'][0]['symbol'] except: import json json.dumps(r,indent=2) symbol = hgnc_id return symbol def get_synonyms(self, identifier): identifier_parts = identifier.split(':') prefix = identifier_parts[0] id = identifier_parts[1] query_type = prefixes_to_hgnc[prefix] headers = {'Accept':'application/json'} r = requests.get('%s/%s/%s' % (self.url, query_type, id), headers= headers).json() docs = r['response']['docs'] synonyms = set() for doc in docs: for key in doc: if key in hgnc_to_prefixes: values = doc[key] prefix = hgnc_to_prefixes[key] if not isinstance(values, list): values = [values] for value in values: if ':' in value: value = value.split(':')[-1] synonym = f'{prefix}:{value}' synonyms.add(synonym) return synonyms #todo, it would probably be straightforward to autogenerate these and have common logic for them def ncbigene_to_uniprotkb(self, node): """Given a node representing an NCBIGene (or Entrez) identifier, retrieve the UniProtKB identifier""" if node.node_type != node_types.GENE: raise ValueError('Node must be a gene') identifier_parts = node.identifier.split(':') if not identifier_parts[0].upper() == 'NCBIGENE': raise ValueError('Node must represent an NCBIGENE identifier.') hgnc_id = identifier_parts[1] headers = {'Accept':'application/json'} r = requests.get('{0}/entrez_id/{1}'.format(self.url, hgnc_id), headers= headers).json() try: uniprots = r['response']['docs'][0]['uniprot_ids'] return [ ( KEdge( 'hgnc', 'ncbigene_to_uniprotkb', is_synonym=True ),\ KNode( identifier='UNIPROTKB:{}'.format(uniprot), node_type = node_types.GENE )) \ for uniprot in uniprots ] except (IndexError, KeyError): #No results back return [] def hgnc_to_uniprotkb(self, node): """Given a node representing an HGNC retrieve the UniProtKB identifier""" if node.node_type != node_types.GENE: raise ValueError('Node must be a gene') identifier_parts = node.identifier.split(':') if not identifier_parts[0].upper() == 'HGNC': raise ValueError('Node must represent an HGNC identifier.') hgnc_id = identifier_parts[1] headers = {'Accept':'application/json'} r = requests.get('{0}/hgnc_id/{1}'.format(self.url, hgnc_id), headers= headers).json() try: uniprots = r['response']['docs'][0]['uniprot_ids'] return [ ( KEdge( 'hgnc', 'ncbigene_to_uniprotkb', is_synonym=True ),\ KNode( identifier='UNIPROTKB:{}'.format(uniprot), node_type = node_types.GENE )) \ for uniprot in uniprots ] except (IndexError,KeyError): #No results back return []
/robokop-interfaces-0.91.tar.gz/robokop-interfaces-0.91/greent/services/hgnc.py
0.452294
0.267956
hgnc.py
pypi
import json import os from decimal import Decimal import boto3 from boto3.dynamodb.conditions import Key def upload_json_to_table(json_path: str, table_name: str, region_name: str = "us-west-2") -> None: """ This function uploads a dictionary to a DynamoDB table (currently only used for anomaly detection) Args: json_path: path to json file table_name: name of table (currently only used for anomaly detection) region_name: AWS region name of DynamoDB database Returns: None """ print(f"upload_json_to_table -> json_path: {json_path}") with open(json_path, "r") as f: data = json.load(f, parse_float=Decimal) dynamodb = boto3.resource("dynamodb", region_name=region_name) table = dynamodb.Table(table_name) print(f"upload_json_to_table -> length DynamoDB: {str(len(data))}") with table.batch_writer() as writer: for entry in data: print(entry) if entry: writer.put_item(Item=entry) else: print(f"upload_json_to_table -> no entry found") print("upload_json_to_table -> done uploading") return def update_field_in_table( table_name: str, key_name: str, key_value: str, update_dict: dict, region_name: str = "us-west-2" ) -> None: """ This function updates a field in a table Args: table_name: AWS Table name key_name: name of column to be updated in table key_value: value of column to be updated update_dict: the dictionary which should be entered (with key, value pairs) region_name: AWS region name of DynamoDB database Returns: None """ update_expression = ["set "] update_values = dict() dynamodb = boto3.resource("dynamodb", region_name=region_name) table = dynamodb.Table(table_name) for key, val in update_dict.items(): update_expression.append(f" {key} = :{key},") update_values[f":{key}"] = val a, v = "".join(update_expression)[:-1], update_values r = table.update_item( Key={key_name: key_value}, UpdateExpression=a, ExpressionAttributeValues=dict(v), ) return def update_tags_field_in_table( table_name: str, key_name: str, key_value: str, update_dict: dict, region_name: str = "us-west-2" ) -> None: """ This function updates the tags field in the dataset table. It first gets all existing tags, then appends them to the new tags. And then updates the table. Args: table_name: AWS Table name key_name: column name of dataset_id, usually 'id' key_value: UUID of the dataset update_dict: dictionary with tag dictionary region_name: AWS region name of DynamoDB database Returns: None """ update_expression = ["set "] update_values = dict() dynamodb = boto3.resource("dynamodb", region_name=region_name) table = dynamodb.Table(table_name) resp = table.query(KeyConditionExpression=Key(key_name).eq(key_value)) print(resp) if len(resp["Items"]) != 1: print("update_tags_field_in_table -> Couldn't update tags field. Mismatch number of dataset table items.") return if ("tags" in resp["Items"][0]) and ("tags" in update_dict.keys()): for old_tag in resp["Items"][0]["tags"]: if old_tag not in update_dict["tags"]: update_dict["tags"].append(old_tag) for key, val in update_dict.items(): update_expression.append(f" {key} = :{key},") update_values[f":{key}"] = val a, v = "".join(update_expression)[:-1], update_values r = table.update_item( Key={key_name: key_value}, UpdateExpression=a, ExpressionAttributeValues=dict(v), ) return def update_dataset_table(json_path: str, table_name: str, region_name: str = "us-west-2") -> None: """ This function updates the AWS Dataset table with the dataset.json values Args: json_path: path to dataset.json table_name: name of AWS dataset table region_name: AWS region name of DynamoDB database Returns: None """ print(f"update_dataset_table -> json path: {json_path}") with open(json_path, "r") as f: data = json.load(f, parse_float=Decimal) dynamodb = boto3.resource("dynamodb", region_name=region_name) table = dynamodb.Table(table_name) print(f"update_dataset_table -> length DynamoDB: {str(len(data))}") if len(data) == 1: dataset_entry = data[0] update_expression = ["set "] update_values = dict() for key, val in dataset_entry.items(): # we are not updating the dataset table with the ID, Tags, Timestamp and Description, as these fields are # populated by the user upload. if key == "id": continue if key == "tags": continue if key == "description": continue if key == "timestamp": update_expression.append(f"#t = :{key},") else: update_expression.append(f" {key} = :{key},") update_values[f":{key}"] = val a, v = "".join(update_expression)[:-1], update_values for kk in v.keys(): if type(v[kk]).__name__ == "float": v[kk] = Decimal(v[kk]) if "id" in dataset_entry.keys(): response = table.update_item( Key={"id": dataset_entry["id"]}, UpdateExpression=a, ExpressionAttributeValues=dict(v), ExpressionAttributeNames={"#t": "timestamp"}, ) return def upload_tables( base_folder: str, args: dict, name_topic_table: str, name_dataset_table: str, update_instead_of_replace: bool = False, region_name: str = "us-west-2", ) -> str: """ This function uploads the dataset.json and the topics.json to the dataset and topic table Args: base_folder: base ingestion path args: configuration dict name_topic_table: name of AWS topic table name_dataset_table: name of AWS dataset table update_instead_of_replace: if True, then the table gets updated, as opposed to creating a new entry region_name: region name of dynamo DB table Returns: dataset id """ tables_folder = os.path.join(base_folder, "tables") if args["upload_topics"]: topic_json = os.path.join(tables_folder, "topic.json") print("Uploading Topics table...") upload_json_to_table(topic_json, name_topic_table, region_name) if args["upload_datasets"]: dataset_json = os.path.join(tables_folder, "dataset.json") print("Uploading Dataset table...") if update_instead_of_replace: update_dataset_table(dataset_json, name_dataset_table, region_name) else: upload_json_to_table(dataset_json, name_dataset_table, region_name) dataset_json = os.path.join(tables_folder, "dataset.json") with open(dataset_json, "r") as f: data = json.load(f, parse_float=Decimal) return data[0]["id"]
/robologs_ros_utils-0.1.1a26-py3-none-any.whl/robologs_ros_utils/destinations/aws/dynamodb/dynamodb_utils.py
0.63624
0.339048
dynamodb_utils.py
pypi
import glob import io import os import subprocess import cv2 import numpy as np from PIL import Image def convert_compressed_depth_to_cv2(compressed_depth): """ Convert a compressedDepth topic image into a cv2 image. compressed_depth must be from a topic /bla/compressedDepth as it's encoded in PNG Code from: https://answers.ros.org/question/249775/display-compresseddepth-image-python-cv2/ """ depth_fmt, compr_type = compressed_depth.format.split(";") # remove white space depth_fmt = depth_fmt.strip() compr_type = compr_type.strip().replace(" png", "") if compr_type != "compressedDepth": raise Exception("Compression type is not 'compressedDepth'." "You probably subscribed to the wrong topic.") depth_header_size = 12 raw_data = compressed_depth.data[depth_header_size:] depth_img_raw = cv2.imdecode(np.frombuffer(raw_data, np.uint8), 0) if depth_img_raw is None: # probably wrong header size raise Exception("Could not decode compressed depth image." "You may need to change 'depth_header_size'!") result = cv2.normalize(depth_img_raw, depth_img_raw, 0, 255, norm_type=cv2.NORM_MINMAX) im_color = cv2.applyColorMap(depth_img_raw, cv2.COLORMAP_JET) return im_color def convert_image_to_cv2(msg): """ Args: msg (): Returns: """ np_arr = np.fromstring(msg.data, np.uint8) image_np = cv2.imdecode(np_arr, cv2.IMREAD_UNCHANGED) return image_np def create_video_from_images(input_path, output_path, output_name="video.mp4", frame_rate=12, resize=None): """ Args: input_path (): output_path (): output_name (): frame_rate (): resize (): Returns: """ img_array = [] output_video_path = os.path.join(output_path, output_name) output_video_path_temp = os.path.join(output_path, "temp.mp4") for filename in sorted(glob.glob(os.path.join(input_path, "./*.jpg"))): img = cv2.imread(filename) if resize: img = cv2.resize(img, (0, 0), fx=resize, fy=resize, interpolation=cv2.INTER_LANCZOS4) if len(img.shape) == 3: height, width, _ = img.shape else: height, width = img.shape size = (width, height) img_array.append(img) out = cv2.VideoWriter(output_video_path_temp, cv2.VideoWriter_fourcc(*"mp4v"), frame_rate, size) for i in range(len(img_array)): out.write(img_array[i]) out.release() subprocess.call(["ffmpeg", "-i", output_video_path_temp, "-vcodec", "libx264", "-y", output_video_path]) os.remove(output_video_path_temp) return
/robologs_ros_utils-0.1.1a26-py3-none-any.whl/robologs_ros_utils/sources/ros1/ros_img_tools.py
0.591723
0.312796
ros_img_tools.py
pypi
import glob import json import os import shutil import tarfile import uuid from typing import Union from zipfile import ZipFile def split_folder_path_to_list(path: str) -> list: """ This function splits a path into a list. Args: path (str): file path Returns: A list with path components """ path = os.path.normpath(path) return path.split(os.sep) def create_directory(path: str, delete_if_exists: bool = False) -> None: """ This function creates a directory. Args: path (str): directory path delete_if_exists (bool): if True, existing directory will be deleted Returns: None """ if delete_if_exists: if os.path.exists(path): shutil.rmtree(path) if not os.path.exists(path): os.makedirs(path) return def check_file_exists(path: str) -> None: """ This function checks if a file exists, and raises an exception if not Args: path (str): input file path Returns: None """ if not os.path.exists(path): raise Exception(f"{path} does not exist.") return def save_json(data: Union[dict, list], path: str) -> None: """ This function saves a list or Args: data (dict or list): path (str): Returns: """ with open(path, "w") as f_json: json.dump(data, f_json, indent=4, sort_keys=True) return def read_json(json_path: str): """ This function reads a json file and return a JSON object Args: json_path (str): JSON file path Returns: JSON object """ with open(json_path) as json_file: data = json.load(json_file) return data def create_uuid() -> str: """ This function returns a UUID Returns: UUID """ return str(uuid.uuid4()) def find_sub_folder(sub_folder_name: str, search_path: str) -> list: """ This function finds a filename in a subfolder. Args: sub_folder_name (str): name of subfolder search_path (str): path of folder to be searched Returns: A list with filenames """ result = [] for root, dir, files in os.walk(search_path): print(root) print(dir) if sub_folder_name in dir: result.append(os.path.join(root)) return result def unzip_file_to_folder(path_zip_file: str, output_folder: str) -> None: """ This function unzips a file to a specific folder location. Args: path_zip_file (str): absolute path of .zip file output_folder (str): absolute path of output folder Returns: None """ with ZipFile(path_zip_file, "r") as zipObj: zipObj.extractall(output_folder) return def untar_file_to_folder(path_tar_file: str, output_folder: str) -> None: """ This function untars a file to a specific folder location. Args: path_tar_file (str): absolute path of .tar file output_folder (str): absolute path of output folder Returns: None """ tar_file = tarfile.open(path_tar_file) tar_file.extractall(output_folder) tar_file.close() return def get_all_files_of_type_in_directory(input_folder: str, file_format: str) -> list: """ This function gets a list of all files of type "file_format" in a directory Args: input_folder (str): input folder path Returns: list with .pdf files """ subfolder_list = glob.glob(f'{input_folder}/*/') file_string = f"./*.{file_format}" ll = list() # look for files in subfolders for entry in subfolder_list: ll = ll + sorted(glob.glob(os.path.abspath(os.path.join(entry, file_string)))) # look for files in folder ll = ll + sorted(glob.glob(os.path.abspath(os.path.join(input_folder, file_string)))) return ll def find_substring_path(input_folder: str, substring: str) -> str: """ This function returns files which containa certain substring Args: input_folder (str): input folder substring (str): substring Returns: file path """ glob_str = f"{input_folder}*{substring}*" return glob.glob(glob_str) def delete_files_of_type(input_folder: str, file_format_list: list = [".jpg", ".png"]) -> None: """ This function deletes all files of type Args: input_folder (str): input folder file_format_list (list): list of file types to be deleted. E.g. [.jpg, .png] Returns: """ for file_format in file_format_list: for filename in sorted(glob.glob(os.path.join(input_folder, f"./*{file_format}"))): os.remove(filename) return
/robologs_ros_utils-0.1.1a26-py3-none-any.whl/robologs_ros_utils/utils/file_utils/file_utils.py
0.757705
0.246397
file_utils.py
pypi
import datetime import deal import pytz from ..helpers.constants import DATE_FORMAT @deal.has("import") def convert_local_time_to_utc(original_time, time_zone: str): """ Converts the datetime object to UTC time. Utilizes the time_zone string for proper conversion. Arguments: original_time (datetime): Datetime object time_zone: Time zone of `original_time` Returns: datetime: Datetime object with the converted UTC time - with no timezone information attached. ??? info "Import Help" ```python from ticktick.helpers.time_methods import convert_local_time_to_utc ``` ??? Example ```python pst = datetime(2020, 12, 11, 23, 59) converted = convert_local_time_to_utc(pst, 'US/Pacific') ``` ??? success "Result" A datetime object that is the UTC equivalent of the original date. ```python datetime(2020, 12, 12, 7, 59) ``` """ utc = pytz.utc time_zone = pytz.timezone(time_zone) original_time = original_time.strftime(DATE_FORMAT) time_object = datetime.datetime.strptime(original_time, DATE_FORMAT) time_zone_dt = time_zone.localize(time_object) return time_zone_dt.astimezone(utc).replace(tzinfo=None) @deal.pure def convert_date_to_tick_tick_format(datetime_obj, tz: str): """ Parses ISO 8601 Format to Tick Tick Date Format It first converts the datetime object to UTC time based off the passed time zone, and then returns a string with the TickTick required date format. !!! info Required Format ISO 8601 Format Example: 2020-12-23T01:56:07+00:00 TickTick Required Format: 2020-12-23T01:56:07+0000 -> Where the last colon is removed for timezone Arguments: datetime_obj (datetime): Datetime object to be parsed. tz: Time zone string. Returns: str: The TickTick accepted date string. ??? info "Import Help" ```python from ticktick.helpers.time_methods import convert_iso_to_tick_tick_format ``` ??? example ```python date = datetime(2022, 12, 31, 14, 30, 45) converted_date = convert_iso_to_tick_tick_format(date, 'US/Pacific') ``` ??? success "Result" The proper format for a date string to be used with TickTick dates. ```python '2022-12-31T22:30:45+0000' ``` """ date = convert_local_time_to_utc(datetime_obj, tz) date = date.replace(tzinfo=datetime.timezone.utc).isoformat() date = date[::-1].replace(":", "", 1)[::-1] return date
/robolson-0.3.83-py3-none-any.whl/rob/ticktick/helpers/time_methods.py
0.829457
0.838481
time_methods.py
pypi
from ..helpers.hex_color import check_hex_color, generate_hex_color from ..managers.check_logged_in import logged_in def _sort_string_value(sort_type: int) -> str: """ Returns the string corresponding to the sort type integer :param sort_type: :return: """ if sort_type not in {0, 1, 2, 3}: raise ValueError( f"Sort Number '{sort_type}' Is Invalid -> Must Be 0, 1, 2 or 3" ) else: sort_dict = {0: "project", 1: "dueDate", 2: "title", 3: "priority"} return sort_dict[sort_type] class TagsManager: """ Handles all interactions for tags. """ SORT_DICTIONARY = {0: "project", 1: "dueDate", 2: "title", 3: "priority"} def __init__(self, client_class): self._client = client_class self.access_token = self._client.access_token self.headers = self._client.HEADERS def _sort_string_value(self, sort_type: int) -> str: """ Returns the string corresponding to the sort type integer :param sort_type: :return: """ if sort_type not in {0, 1, 2, 3}: raise ValueError( f"Sort Number '{sort_type}' Is Invalid -> Must Be 0, 1, 2 or 3" ) return self.SORT_DICTIONARY[sort_type] def _check_fields( self, label: str = None, color: str = "random", parent_label: str = None, sort: int = None, ) -> dict: """ Checks the passed parameters and returns a dictionary of the objects :param label: Name of the tag :param color: Color of the tag :param parent_label: Parent tag name :param sort: Sort type of the tag :return: Dictionary of the valid objects """ if label is not None: # Make sure label is a string if not isinstance(label, str): raise TypeError(f"Label Must Be A String") # Tag names should not be repeated, so make sure passed name does not exist tag_list = self._client.get_by_fields( search="tags", name=label.lower() ) # Name is lowercase version of label if tag_list: raise ValueError(f"Invalid Tag Name '{label}' -> It Already Exists") # Check color_id if not isinstance(color, str): raise TypeError(f"Color Must Be A Hex Color String") if color.lower() == "random": color = generate_hex_color() # Random color will be generated elif color is not None: if not check_hex_color(color): raise ValueError("Invalid Hex Color String") # Check parent_name if parent_label is not None: if not isinstance(parent_label, str): raise TypeError(f"Parent Name Must Be A String") parent_label = parent_label.lower() parent = self._client.get_by_fields(search="tags", name=parent_label) if not parent: raise ValueError( f"Invalid Parent Name '{parent_label}' -> Does Not Exist" ) # Check sort_type if sort is None: sort = "project" else: sort = _sort_string_value(sort) # Return our dictionary of checked and changed values return { "label": label, "color": color, "parent": parent_label, "sortType": sort, "name": label.lower(), } def builder( self, label: str, color: str = "random", parent: str = None, sort: int = None ) -> dict: """ Creates and returns a local tag object. Helper method for [create][managers.tags.TagsManager.create] to make batch creating projects easier. !!! note The parent tag must already exist prior to calling this method. Arguments: label: Desired label of the tag - tag labels cannot be repeated. color: Hex color string. A random color will be generated if no color is specified. parent: The label of the parent tag if desired (include capitals in the label if it exists). sort: The desired sort type of the tag. Valid integer values are present in the [sort dictionary](tags.md#sort-dictionary). The default sort value will be by 'project' Returns: A dictionary containing all the fields necessary to create a tag remotely. Raises: TypeError: If any of the types of the arguments are wrong. ValueError: Tag label already exists. ValueError: Parent tag does not exist. ValueError: The hex string color inputted is invalid. !!! example ```python tag_name = 'Books' # The name for our tag parent_name = 'Productivity' # The desired parent tag -> this should already exist. color_code = '#1387c4' sort_type = 1 # Sort by `dueDate` tag_object = client.tag.builder(tag_name, parent=parent_name, color=color_code, sort=sort_type) ``` ??? success "Result" The required fields to create a tag object are created and returned in a dictionary. ```python {'label': 'Fiction', 'color': '#1387c4', 'parent': 'books', 'sortType': 'dueDate', 'name': 'fiction'} ``` """ # Perform checks return self._check_fields(label, color=color, parent_label=parent, sort=sort) def create( self, label, color: str = "random", parent: str = None, sort: int = None ): """ Creates a tag remotely. Supports single tag creation or batch tag creation. !!! tip Allows creation with a label that may normally not be allowed by `TickTick` for tags. Normal `TickTick` excluded characters are: \\ / " # : * ? < > | Space Arguments: label (str or list): **Single Tag (str)**: The desired label of the tag. Tag labels cannot be repeated. **Multiple Tags (list)**: A list of tag objects created using the [builder][managers.tags.TagsManager.builder] method. color: Hex color string. A random color will be generated if no color is specified. parent: The label of the parent tag if desired (include capitals in if it exists). sort: The desired sort type of the tag. Valid integer values are present in the [sort dictionary](tags.md#sort-dictionary). The default sort value will be by 'project' Returns: dict or list: **Single Tag (dict)**: The created tag object dictionary. **Multiple Tags (list)**: A list of the created tag object dictionaries. Raises: TypeError: If any of the types of the arguments are wrong. ValueError: Tag label already exists. ValueError: Parent tag does not exist. ValueError: The hex string color inputted is invalid. RuntimeError: The tag(s) could not be created. !!! example "Single Tag" === "Just A Label" ```python tag = client.tag.create('Fun') ``` ??? success "Result" The tag object dictionary is returned. ```python {'name': 'fun', 'label': 'Fun', 'sortOrder': 0, 'sortType': 'project', 'color': '#9b69f3', 'etag': '7fc8zb58'} ``` Our tag is created. ![image](https://user-images.githubusercontent.com/56806733/104658773-5bbb5500-5678-11eb-9d44-27214203d70e.png) === "Specify a Color" A random color can be generated using [generate_hex_color][helpers.hex_color.generate_hex_color]. However, just not specifying a color will automatically generate a random color (as seen in the previous tab) You can always specify the color that you want. ```python tag = client.tag.create('Fun', color='#86bb6d') ``` ??? success "Result" The tag object dictionary is returned and our project is created with the color specified. ```python {'name': 'fun', 'label': 'Fun', 'sortOrder': 0, 'sortType': 'project', 'color': '#86bb6d', 'etag': '8bzzdws3'} ``` ![image](https://user-images.githubusercontent.com/56806733/104659184-0c295900-5679-11eb-9f3c-2cd154c0500c.png) === "Specifying a Parent Tag" Tags can be nested one level. To create a tag that is nested, include the label of the parent tag. The parent tag should already exist. ```python tag = client.tag.create('Fun', parent='Hobbies') ``` ??? success "Result" The tag object dictionary is returned and our tag is created nested under the parent tag. ```python {'name': 'fun', 'label': 'Fun', 'sortOrder': 0, 'sortType': 'project', 'color': '#d2a6e4', 'etag': 'nauticx1', 'parent': 'hobbies'} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104659785-24e63e80-567a-11eb-9a62-01ebca55e649.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104659814-33ccf100-567a-11eb-8dca-c91aea68b4c7.png) === "Sort Type" You can specify the sort type of the created tag using integer values from the [sort dictionary](#sort-dictionary). ```python tag = client.tag.create('Fun', sort=2) # Sort by `title` ``` ??? success "Result" The tag object dictionary is returned and our tag has the specified sort type. ```python {'name': 'fun', 'label': 'Fun', 'sortOrder': 0, 'sortType': 'title', 'color': '#e7e7ba', 'etag': 'n4k3pezc'} ``` ![image](https://user-images.githubusercontent.com/56806733/104660156-e4d38b80-567a-11eb-8c61-8fb874a515a2.png) !!! example "Multiple Tag Creation (batch)" To create multiple tags, build the tag objects first using the [builder][managers.projects.ProjectManager.builder] method. Pass in a list of the project objects to create them remotely. ```python parent_tag = client.tag.create('Hobbies') # Create a parent tag. # We will create tag objects using builder that will be nested under the parent tag fun_tag = client.tag.builder('Fun', sort=2, parent='Hobbies') read_tag = client.tag.builder('Read', color='#d2a6e4', parent='Hobbies') movie_tag = client.tag.builder('Movies', parent='Hobbies') # Create the tags tag_list = [fun_tag, read_tag, movie_tag] created_tags = client.tag.create(tag_list) ``` ??? success "Result" The tag object dictionaries are returned in a list. ```python [{'name': 'fun', 'label': 'Fun', 'sortOrder': 0, 'sortType': 'title', 'color': '#172d1c', 'etag': '1tceclp4', 'parent': 'hobbies'}, {'name': 'read', 'label': 'Read', 'sortOrder': 0, 'sortType': 'project', 'color': '#d2a6e4', 'etag': 'ykdem8dg', 'parent': 'hobbies'}, {'name': 'movies', 'label': 'Movies', 'sortOrder': 0, 'sortType': 'project', 'color': '#94a5f8', 'etag': 'o0nifkbv', 'parent': 'hobbies'}] ``` ![image](https://user-images.githubusercontent.com/56806733/104660625-cb7f0f00-567b-11eb-8649-68646870ccfa.png) """ batch = False # Bool signifying batch create or not if isinstance(label, list): # Batch tag creation triggered obj = label # Assuming all correct objects batch = True else: if not isinstance(label, str): raise TypeError( "Required Positional Argument Must Be A String or List of Tag Objects" ) # Create a single object obj = self.builder(label=label, color=color, parent=parent, sort=sort) if not batch: obj = [obj] url = self._client.BASE_URL + "batch/tag" payload = {"add": obj} response = self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() if not batch: return self._client.get_by_etag( self._client.parse_etag(response), search="tags" ) else: etag = response["id2etag"] etag2 = list(etag.keys()) # Tag names are out of order labels = [x["name"] for x in obj] # Tag names are in order items = [""] * len(obj) # Create enough spots for the objects for tag in etag2: index = labels.index(tag) # Object of the index is here actual_etag = etag[tag] # Get the actual etag found = self._client.get_by_etag(actual_etag, search="tags") items[index] = found # Place at the correct index if len(items) == 1: return items[0] else: return items def rename(self, old: str, new: str) -> dict: """ Renames a tag. Arguments: old: Current label of the tag to be changed. new: Desired new label of the tag. Returns: The tag object with the updated label. Raises: TypeError: If `old` and `new` are not strings. ValueError: If the `old` tag label does not exist. ValueError: If the `new` tag label already exists. RuntimeError: If the renaming was unsuccessful. !!! example "Changing a Tag's Label" Pass in the current label of the tag, and the desired new label of the tag. ```python # Lets assume that we have a tag that already exists named "Movie" old_label = "Movie" new_label = "Movies" updated_tag = client.tag.rename(old_label, new_label) ``` ??? success "Result" The updated tag object dictionary is returned. ```python {'name': 'movies', 'label': 'Movies', 'sortOrder': 0, 'sortType': 'project', 'color': '#134397', 'etag': 'qer1jygy'} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104661255-fcac0f00-567c-11eb-9f10-69af8b50e0b4.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104661299-19e0dd80-567d-11eb-825f-758d83178295.png) """ # Check that both old and new are strings if not isinstance(old, str) or not isinstance(new, str): raise TypeError("Old and New Must Be Strings") # Make sure the old tag exists old = old.lower() # Check if the tag object exists obj = self._client.get_by_fields(name=old, search="tags") if not obj: raise ValueError(f"Tag '{old}' Does Not Exist To Rename") # Make sure the new tag does not exist temp_new = new.lower() # Check if the tag object exists found = self._client.get_by_fields(name=temp_new, search="tags") if found: raise ValueError(f"Name '{new}' Already Exists -> Cannot Duplicate Name") url = self._client.BASE_URL + "tag/rename" payload = {"name": obj["name"], "newName": new} response = self._client.http_put( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() # Response from TickTick does not return the new etag of the object, we must find it ourselves new_obj = self._client.get_by_fields(name=temp_new, search="tags") # Return the etag of the updated object return self._client.get_by_etag(new_obj["etag"], search="tags") def color(self, label: str, color: str) -> dict: """ Change the color of a tag. For batch changing colors, see [update][managers.tags.TagsManager.update]. Arguments: label: The label of the tag to be changed. color: The new desired hex color string. Returns: The updated tag dictionary object. Raises: TypeError: If `label` or `color` are not strings. ValueError: If the tag `label` does not exist. ValueError: If `color` is not a valid hex color string. RuntimeError: If changing the color was not successful. !!! example "Changing a Tag's Color" ```python # Lets assume that we have a tag named "Movies" that we want to change the color for. new_color = '#134397' movies_updated = client.tag.color('Movies', new_color) ``` ??? success "Result" The updated tag dictionary object is returned. ```python {'name': 'movies', 'label': 'Movies', 'sortOrder': 0, 'sortType': 'project', 'color': '#134397', 'etag': 'wwb49yfr'} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104661749-0eda7d00-567e-11eb-836f-3a8851bcf9a5.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104661860-55c87280-567e-11eb-93b5-054fa4f1104a.png) """ if not isinstance(label, str) or not isinstance(color, str): raise TypeError("Label and Color Must Be Strings") # Get the object label = label.lower() obj = self._client.get_by_fields(name=label, search="tags") if not obj: raise ValueError(f"Tag '{label}' Does Not Exist To Update") # Check the color if not check_hex_color(color): raise ValueError(f"Hex Color String '{color}' Is Not Valid") obj["color"] = color # Set the color url = self._client.BASE_URL + "batch/tag" payload = {"update": [obj]} response = self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() return self._client.get_by_etag(response["id2etag"][obj["name"]]) def sorting(self, label: str, sort: int) -> dict: """ Change the sort type of a tag. For batch changing sort types, see [update][managers.tags.TagsManager.update]. Arguments: label: The label of the tag to be changed. sort: The new sort type specified by an integer 0-3. See [sort dictionary](tags.md#sort-dictionary). Returns: The updated tag dictionary object. Raises: TypeError: If `label` is not a string or if `sort` is not an int. ValueError: If the tag `label` does not exist. RuntimeError: If the updating was unsuccessful. !!! example "Changing the Sort Type" ```python # Lets assume that we have a tag named "Movies" with the sort type "project" changed_sort_type = client.tag.sorting("Movies", 1) # Sort by 'dueDate' ``` ??? success "Result" The updated task dictionary object is returned. ```python {'name': 'movies', 'label': 'Movies', 'sortOrder': 0, 'sortType': 'dueDate', 'color': '#134397', 'etag': 'fflj8iy0'} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104663625-3f241a80-5682-11eb-93a7-73d280c59b3e.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104663663-5531db00-5682-11eb-9440-5673a70840b4.png) """ if not isinstance(label, str) or not isinstance(sort, int): raise TypeError("Label Must Be A String and Sort Must Be An Int") # Get the object label = label.lower() obj = self._client.get_by_fields(name=label, search="tags") if not obj: raise ValueError(f"Tag '{label}' Does Not Exist To Update") sort = self._sort_string_value(sort) # Get the sort string for the value obj["sortType"] = sort # set the object field url = self._client.BASE_URL + "batch/tag" payload = {"update": [obj]} response = self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() return self._client.get_by_etag(response["id2etag"][obj["name"]]) def nesting(self, child: str, parent: str) -> dict: """ Update tag nesting. Move an already created tag to be nested underneath a parent tag - or ungroup an already nested tag. !!! warning "Nesting Tags More Than One Level Does Not Work" !!! example === "Nesting Explanation" ```md Parent Tag -> Level Zero Child Tag 1 -> Level One: This is the most nesting that is allowed by TickTick for tags. Child Tag 2 -> Level Two: Not allowed ``` Arguments: child: Label of the tag to become the child parent: Label of the tag that will become the parent. Returns: The updated tag object dictionary. Raises: TypeError: If `child` and `parent` are not strings ValueError: If `child` does not exist to update. ValueError: If `parent` does not exist. RuntimeError: If setting the parent was unsuccessful. !!! example "Nesting" === "Nesting A Tag" To nest a tag underneath another tag, pass in the labels of the child and parent. ```python # Lets assume that we have a tag named "Movies" # We have another tag named "Hobbies" that we want to make the parent to "Movies" child = "Movies" parent = "Hobbies" nesting_update = client.tag.nesting(child, parent) ``` ??? success "Result" The updated child tag dictionary object is returned. ```python {'name': 'movies', 'label': 'Movies', 'sortOrder': 0, 'sortType': 'dueDate', 'color': '#134397', 'etag': 'ee34aft9', 'parent': 'hobbies'} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104665300-da6abf00-5685-11eb-947f-889187cec008.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104665366-f706f700-5685-11eb-93eb-9316befec5fc.png) === "Changing The Parent Of An Already Nested Tag" If the tag is already nested, changing the parent is still no different. ```python # We have a tag named "Movies" that is already nested underneath "Hobbies" # We want to nest "Movies" underneath the tag "Fun" instead. child = "Movies" parent = "Fun" nesting_update = client.tag.nesting(child, parent) ``` ??? success "Result" The updated child tag dictionary object is returned. ```python {'name': 'movies', 'label': 'Movies', 'sortOrder': 0, 'sortType': 'dueDate', 'color': '#134397', 'etag': '91qpuq71', 'parent': 'fun'} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104665599-ab088200-5686-11eb-8b36-5ee873289db7.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104665821-35e97c80-5687-11eb-8098-426816970f3e.png) === "Un-grouping A Child Tag" If the tag is nested and you want to ungroup it, pass in `None` for `parent`. ```python # We have a tag named "Movies" that is nested underneath "Fun" # We don't want to have "Movies" nested anymore. child = "Movies" parent = None nesting_update = client.tag.nesting(child, parent) ``` ??? success "Result" The updated child tag dictionary object is returned. ```python {'name': 'movies', 'label': 'Movies', 'sortOrder': 0, 'sortType': 'dueDate', 'color': '#134397', 'etag': 'jcoc94p6'} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104666038-be681d00-5687-11eb-8490-83c370977267.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104666080-dcce1880-5687-11eb-9ca8-5abcdb4109ba.png) """ if not isinstance(child, str): raise TypeError("Inputs Must Be Strings") if parent is not None: if not isinstance(parent, str): raise TypeError("Inputs Must Be Strings") # Get the object child = child.lower() obj = self._client.get_by_fields(name=child, search="tags") if not obj: raise ValueError(f"Tag '{child}' Does Not Exist To Update") # Four Cases # Case 1: No Parent -> Want a Parent # Case 2: No Parent -> Doesn't Want a Parent # Case 3: Has Parent -> Wants a Different Parent # Case 4: Has Parent -> Doesn't Want a Parent # Case 1: Determine if the object has a parent try: if obj["parent"]: # It has a parent if parent is not None: # Case 3 # check if the parent is already the same, if it is just return if obj["parent"] == parent.lower(): return obj else: new_p = parent.lower() obj["parent"] = new_p else: new_p = obj["parent"] # Case 4 obj["parent"] = "" elif obj["parent"] is None: raise ValueError("Parent Does Not Exist") except KeyError: # It does not have a parent if parent is not None: # Wants a different parent new_p = parent.lower() # -> Case 1 obj["parent"] = new_p else: # Doesn't want a parent -> Case 2 return obj # We don't have to do anything if no parent and doesn't want a parent # Have to find the project pobj = self._client.get_by_fields(name=new_p, search="tags") if not pobj: raise ValueError(f"Tag '{parent}' Does Not Exist To Set As Parent") url = self._client.BASE_URL + "batch/tag" payload = {"update": [pobj, obj]} response = self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() return self._client.get_by_etag(response["id2etag"][obj["name"]], search="tags") def update(self, obj): """ Generic update method. Supports single and batch tag update. !!! important Updating tag properties like `parent` and renaming tags must be completed through their respective class methods to work: [nesting][managers.tags.TagsManager.nesting] and [renaming][managers.tags.TagsManager.rename]. These updates use different endpoints to the traditional updating. !!! important You are able to batch update sorting and color of tag objects through this method. If you only need to update single tags, it is recommended you use the class methods: [sorting][managers.tags.TagsManager.sorting] and [color][managers.tags.TagsManager.color] !!! info More information on Tag Object properties [here](tags.md#example-ticktick-tag-dictionary) Arguments: obj (dict or list): **Single Tag (dict)**: The tag dictionary object to update. **Multiple Tags (list)**: The tag dictionaries to update in a list. Returns: dict or list: **Single Tag (dict)**: The updated tag dictionary object. **Multiple Tags (list)**: The updated tag dictionaries in a list. Raises: TypeError: If `obj` is not a dict or list. RuntimeError: If the updating was unsuccessful. !!! example "Updating Tags" === "Single Tag Update" Change a field directly in the task object then pass it to the method. See above for more information about what can actually be successfully changed through this method. ```python # Lets say we have a tag named "Fun" that we want to change the color of. # We can change the color by updating the field directly. fun_tag = client.get_by_fields(label='Fun', search='tags') # Get the tag object new_color = '#d00000' fun_tag['color'] = new_color # Change the color updated_fun_tag = client.tag.update(fun_tag) # Pass the object to update. ``` ??? success "Result" The updated tag dictionary object is returned. ```python {'name': 'fun', 'label': 'Fun', 'sortOrder': 2199023255552, 'sortType': 'project', 'color': '#d00000', 'etag': 'i85c8ijo'} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104669635-4aca0e00-568f-11eb-8bc6-9572a432b623.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104669824-ac8a7800-568f-11eb-93d6-ac40235bcd3f.png) === "Multiple Tag Update" Changing the fields is the same as with updating a single tag, except you will need to pass the objects in a list to the method. ```python # Lets update the colors for three tags: "Fun", "Hobbies", and "Productivity" fun_tag = client.get_by_fields(label="Fun", search='tags') hobbies_tag = client.get_by_fields(label="Hobbies", search='tags') productivity_tag = client.get_by_fields(label="Productivity", search='tags') fun_color_new = "#951a63" hobbies_color_new = "#0f8a1f" productivity_color_new = "#493293" # Change the fields directly fun_tag['color'] = fun_color_new hobbies_tag['color'] = hobbies_color_new productivity_tag['color'] = productivity_color_new # The objects must be passed in a list update_tag_list = [fun_tag, hobbies_tag, productivity_tag] updated_tags = client.tag.update(update_tag_list) ``` ??? success "Result" The updated task dictionary objects are returned in a list. ```python [{'name': 'fun', 'label': 'Fun', 'sortOrder': -1099511627776, 'sortType': 'project', 'color': '#951a63', 'etag': 'n543ajq2'}, {'name': 'hobbies', 'label': 'Hobbies', 'sortOrder': -549755813888, 'sortType': 'project', 'color': '#0f8a1f', 'etag': 'j4nspkg4'}, {'name': 'productivity', 'label': 'Productivity', 'sortOrder': 0, 'sortType': 'project', 'color': '#493293', 'etag': '34qz9bzq'}] ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104670498-cd070200-5690-11eb-9fdd-0287fa6c7e7b.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104670531-dc864b00-5690-11eb-844a-899031335922.png) """ batch = False # Bool signifying batch create or not if isinstance(obj, list): # Batch tag creation triggered obj_list = obj # Assuming all correct objects batch = True else: if not isinstance(obj, dict): raise TypeError( "Required Positional Argument Must Be A Dict or List of Tag Objects" ) if not batch: obj_list = [obj] url = self._client.BASE_URL + "batch/tag" payload = {"update": obj_list} response = self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() if not batch: return self._client.get_by_etag( self._client.parse_etag(response), search="tags" ) else: etag = response["id2etag"] etag2 = list(etag.keys()) # Tag names are out of order labels = [x["name"] for x in obj_list] # Tag names are in order items = [""] * len(obj_list) # Create enough spots for the objects for tag in etag2: index = labels.index(tag) # Object of the index is here actual_etag = etag[tag] # Get the actual etag found = self._client.get_by_etag(actual_etag, search="tags") items[index] = found # Place at the correct index return items def merge(self, label, merged: str): """ Merges the tasks of the passed tags into the argument `merged` and deletes all the tags except `merged` Args can be individual label strings, or a list of strings Arguments: label (str or list): **Single Tag (str)**: The label string of the tag to merge. **Multiple Tags (list)**: The label strings of the tags to merge in a list. merged: The label of the tag that will remain after the merge. Returns: dict: The tag dictionary object that remains after the merge. Raises: TypeError: If `merged` is not a str or if `label` is not a str or list. ValueError: If any of the labels do not exist. RuntimeError: If the merge could not be successfully completed. !!! example "Merging Tags" === "Merging Two Tags" Merging two tags requires the label of the tag that you want kept after the merge, and the label of the tag that will be merged. Lets assume that we have two tags: "Work" and "School". I want to merge the tag "School" into "Work". What should happen is that any tasks that are tagged "School", will be updated to have the tag "Work", and the "School" tag will be deleted. ```python merged_tags = client.tag.merge("School", "Work") ``` ??? success "Result" The tag that remains after the merge is returned. ```python {'name': 'work', 'label': 'Work', 'sortOrder': 2199023255552, 'sortType': 'project', 'color': '#3876E4', 'etag': 'eeh8zrup'} ``` **Before** "School" has two tasks that have it's tag. ![image](https://user-images.githubusercontent.com/56806733/104680244-45c38980-56a4-11eb-968d-884160c77247.png) "Work" has no tasks. ![image](https://user-images.githubusercontent.com/56806733/104680366-90dd9c80-56a4-11eb-975f-5e769e9ea491.png) **After** "School" has been deleted. The tasks that used to be tagged with "School" are now tagged with "Work". ![image](https://user-images.githubusercontent.com/56806733/104680576-0c3f4e00-56a5-11eb-9536-ef3a7fcf20ec.png) === "Merging Three Or More Tags" Merging multiple tags into a single tag requires passing the labels of the tags to merge in a list. Lets assume that we have three tags: "Work", "School", and "Hobbies" . I want to merge the tag "School" and the tag "Hobbies" into "Work". What should happen is that any tasks that are tagged with "School" or "Hobbies", will be updated to have the tag "Work", and the "School" and "Hobbies" tags will be deleted. ```python merge_tags = ["School", "Hobbies"] result = client.tag.merge(merge_tags, "Work") ``` ??? success "Result" The tag that remains after the merge is returned. ```python {'name': 'work', 'label': 'Work', 'sortOrder': 2199023255552, 'sortType': 'project', 'color': '#3876E4', 'etag': 'ke23lp06'} ``` **Before** "School" has two tasks. ![image](https://user-images.githubusercontent.com/56806733/104681135-7ad0db80-56a6-11eb-81dd-03e4a151cfd9.png) "Hobbies" has two tasks. ![image](https://user-images.githubusercontent.com/56806733/104681104-67257500-56a6-11eb-99b0-57bbb876a59e.png) "Work" has one task. ![image](https://user-images.githubusercontent.com/56806733/104681164-89b78e00-56a6-11eb-99a8-c85ef418d2a0.png) **After** "Work" has five tasks now, and the tags "School" and "Hobbies" have been deleted. ![image](https://user-images.githubusercontent.com/56806733/104681239-b7043c00-56a6-11eb-9b45-5522b9c69cb0.png) """ # Make sure merged is a string if not isinstance(merged, str): raise ValueError("Merged Must Be A String") # Make sure label is a string or list if not isinstance(label, str) and not isinstance(label, list): raise ValueError(f"Label must be a string or a list.") # Lowercase merged merged = merged.lower() # Make sure merged exists kept_obj = self._client.get_by_fields(name=merged, search="tags") if not kept_obj: raise ValueError(f"Kept Tag '{merged}' Does Not Exist To Merge") merge_queue = [] # Verify all args are valid, and add them to a list if isinstance(label, str): string = label.lower() # Make sure it exists retrieved = self._client.get_by_fields(name=string, search="tags") if not retrieved: raise ValueError(f"Tag '{label}' Does Not Exist To Merge") merge_queue.append(retrieved) else: for ( item ) in ( label ): # Loop through the items in the list and check items are a string and exist # Make sure the item is a string if not isinstance(item, str): raise ValueError(f"Item '{item}' Must Be A String") string = item.lower() # Make sure it exists found = self._client.get_by_fields(name=string, search="tags") if not found: raise ValueError(f"Tag '{item}' Does Not Exist To Merge") merge_queue.append(found) for labels in merge_queue: # Merge url = self._client.BASE_URL + "tag/merge" payload = {"name": labels["name"], "newName": kept_obj["name"]} self._client.http_put( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() return kept_obj def delete(self, label): """ Delete tag(s). Supports single tag deletion and "mock" batch tag deletion. !!! info Batch deleting for tags is not supported by TickTick. However, passing in a list of labels to delete will "mock" batch deleting - but individual requests will have to be made for each deletion. Arguments: label (str or list): **Single Tag (str)**: The label of the tag. **Multiple Tags (list)**: A list of tag label strings. Returns: dict or list: **Single Tag (dict)**: The dictionary object of the deleted tag. **Multiple Tags (list)**: The dictionary objects of the deleted tags in a list. Raises: TypeError: If `label` is not a string or list. ValueError: If a label does not exist. RuntimeError: If the tag could not be deleted successfully. !!! example "Tag Deletion" === "Single Tag Deletion" Deleting a single tag requires passing in the label string of the tag. ```python # Lets delete a tag named "Fun" delete_tag = client.tag.delete("Fun") ``` ??? success "Result" The dictionary object of the deleted tag returned. ```python {'name': 'fun', 'label': 'Fun', 'sortOrder': -3298534883328, 'sortType': 'project', 'color': '#A9949E', 'etag': '32balm5l'} ``` **Before** "Fun" Tag Exists ![image](https://user-images.githubusercontent.com/56806733/104668024-2c164800-568c-11eb-853e-5b7eba1f4528.png) **After** "Fun" Tag Does Not Exist ![image](https://user-images.githubusercontent.com/56806733/104667768-ac887900-568b-11eb-9bfb-597c752e4c3b.png) === "Multiple Tag Deletion" Deleting multiple tags requires passing the label strings of the tags in a list. ```python # Lets delete tags named "Fun", "Movies", and "Hobbies" delete_labels = ["Fun", "Movies", "Hobbies"] deleted_tags = client.tag.delete(delete_labels) ``` ??? success "Result" The dictionary object of the deleted tags returned in a list. ```python [{'name': 'fun', 'label': 'Fun', 'sortOrder': -3848290697216, 'sortType': 'project', 'color': '#FFD966', 'etag': '56aa6dva'}, {'name': 'movies', 'label': 'Movies', 'sortOrder': -2748779069440, 'sortType': 'dueDate', 'color': '#134397', 'etag': 's0czro3e'}, {'name': 'hobbies', 'label': 'Hobbies', 'sortOrder': -2199023255552, 'sortType': 'project', 'color': '#ABA6B5', 'etag': 'shu2xbvq'}] ``` **Before** All three tags exist. ![image](https://user-images.githubusercontent.com/56806733/104668135-61bb3100-568c-11eb-8707-314deb42cd1d.png) **After** All three tags don't exist. ![image](https://user-images.githubusercontent.com/56806733/104668185-7b5c7880-568c-11eb-8da0-aaee68d53500.png) """ # Determine if the tag exists if not isinstance(label, str) and not isinstance(label, list): raise TypeError("Label Must Be A String or List Of Strings") url = self._client.BASE_URL + "tag" if isinstance(label, str): label = [label] # If a singular string we are going to add it to a list objects = [] for lbl in label: if not isinstance(lbl, str): raise TypeError(f"'{lbl}' Must Be A String") lbl = lbl.lower() tag_obj = self._client.get_by_fields( name=lbl, search="tags" ) # Get the tag object if not tag_obj: raise ValueError(f"Tag '{lbl}' Does Not Exist To Delete") # We can assume that only one tag has the name params = {"name": tag_obj["name"]} response = self._client.http_delete( url, params=params, cookies=self._client.cookies, headers=self.headers ) # Find the tag in the tags list and delete it, then return the deleted object objects.append( self._client.delete_from_local_state( search="tags", etag=tag_obj["etag"] ) ) self._client.sync() if len(objects) == 1: return objects[0] else: return objects
/robolson-0.3.83-py3-none-any.whl/rob/ticktick/managers/tags.py
0.895831
0.606906
tags.py
pypi
from ..helpers.hex_color import check_hex_color, generate_hex_color from ..managers.check_logged_in import logged_in class ProjectManager: """ Handles all interactions for projects. """ def __init__(self, client_class): self._client = client_class self.access_token = self._client.access_token self.headers = self._client.HEADERS def builder( self, name: str, color: str = "random", project_type: str = "TASK", folder_id: str = None, ) -> dict: """ Creates and returns a local project object. Helper method for [create][managers.projects.ProjectManager.create] to make batch creating projects easier. !!! note The project [folder][managers.projects.ProjectManager.create_folder] must already exist prior to calling this method. Arguments: name: Desired name of the project - project names cannot be repeated color: Hex color string. A random color will be generated if no color is specified. project_type: 'TASK' or 'NOTE' folder_id: The project folder id that the project should be placed under (if desired) Returns: A dictionary containing all the fields necessary to create a remote project. Raises: TypeError: If any of the types of the arguments are wrong. ValueError: Project name already exists ValueError: Project Folder corresponding to the ID does not exist. ValueError: The hex string color inputted is invalid. Argument rules are shared with [create][managers.projects.ProjectManager.create], so for more examples on how to use the arguments see that method. !!! example ```python project_name = 'Work' # The name of our project # Lets assume that we have a project group folder that already exists named 'Productivity' productivity_folder = client.get_by_fields(name='Productivity', search='project_folders') productivity_id = productivity_folder['id'] # Build the object project_object = client.project.builder(project_name, folder_id=productivity_id) ``` ??? success "Result" ```python # The fields needed for a successful project creation are set. {'name': 'Work', 'color': '#665122', 'kind': 'TASK', 'groupId': '5ffe11b7b04b356ce74d49da'} ``` """ if not isinstance(name, str): raise TypeError("Name must be a string") if not isinstance(color, str) and color is not None: raise TypeError("Color must be a string") if not isinstance(project_type, str): raise TypeError("Project type must be a string") if not isinstance(folder_id, str) and folder_id is not None: raise TypeError("Folder id must be a string") # Go through self.state['lists'] and determine if the name already exists id_list = self._client.get_by_fields(search="projects", name=name) if id_list: raise ValueError(f"Invalid Project Name '{name}' -> It Already Exists") # Determine if parent list exists if folder_id is not None: parent = self._client.get_by_id(folder_id, search="project_folders") if not parent: raise ValueError(f"Parent Id {folder_id} Does Not Exist") # Make sure project type is valid if project_type != "TASK" and project_type != "NOTE": raise ValueError( f"Invalid Project Type '{project_type}' -> Should be 'TASK' or 'NOTE'" ) # Check color_id if color == "random": color = generate_hex_color() # Random color will be generated elif color is not None: if not check_hex_color(color): raise ValueError("Invalid Hex Color String") return { "name": name, "color": color, "kind": project_type, "groupId": folder_id, } def create( self, name, color: str = "random", project_type: str = "TASK", folder_id: str = None, ): """ Creates a project remotely. Supports single project creation or batch project creation. Arguments: name (str or list): **Single Project** (str) : The desired name of the project. Project names cannot be repeated. **Multiple Projects** (list) : A list of project objects created using the [builder][managers.projects.ProjectManager.builder] method. color: Hex color string. A random color will be generated if no color is specified. project_type: 'TASK' or 'NOTE' folder_id: The project folder id that the project should be placed under (if desired) Returns: dict or list: **Single Project**: Return the dictionary of the object. **Multiple Projects**: Return a list of dictionaries containing all the created objects in the same order as created. Raises: TypeError: If any of the types of the arguments are wrong. ValueError: Project name already exists ValueError: Project Folder corresponding to the ID does not exist. ValueError: The hex string color inputted is invalid. RuntimeError: The project(s) could not be created. !!! example "Single Project" === "Just A Name" ```python project = client.project.create('School') ``` ??? success "Result" ```python # The dictionary object of the created project is returned. {'id': '5ffe1673e4b062d60dd29dc0', 'name': 'School', 'isOwner': True, 'color': '#51b9e3', 'inAll': True, 'sortOrder': 0, 'sortType': None, 'userCount': 1, 'etag': 'uerkdkcd', 'modifiedTime': '2021-01-12T21:36:51.890+0000', 'closed': None, 'muted': False, 'transferred': None, 'groupId': None, 'viewMode': None, 'notificationOptions': None, 'teamId': None, 'permission': None, 'kind': 'TASK'} ``` Our project is created. [![project-create.png](https://i.postimg.cc/d1NNqN7F/project-create.png)](https://postimg.cc/PpZQy4zV) === "Specify a Color" A random color can be generated using [generate_hex_color][helpers.hex_color.generate_hex_color]. However, just not specifying a color will automatically generate a random color (as seen in the previous tab). You can always specify the color that you want. ```python project = client.project.create('Work', color='#86bb6d') ``` ??? success "Result" Our project is created with the color specified. [![project-color.png](https://i.postimg.cc/K8ppnvrb/project-color.png)](https://postimg.cc/5XvmJJRK) === "Changing the Project Type" The default project type is for Tasks. To change the type to handle Notes, pass in the string 'NOTE' ```python project = client.project.create('Hobbies', project_type='NOTE') ``` ??? success "Result" The project type is now for notes. [![project-note.png](https://i.postimg.cc/fy0Mhrzt/project-note.png)](https://postimg.cc/rRcB1gtM) === "Creating Inside of A Folder" !!! warning "Note For `folder_id`" The project [folder][managers.projects.ProjectManager.create_folder] must already exist prior to calling this method. ```python project_name = 'Day Job' # The name of our project # Lets assume that we have a project group folder that already exists named 'Productivity' productivity_folder = client.get_by_fields(name='Productivity', search='project_folders') productivity_id = productivity_folder['id'] # Create the object project_object = client.project.create(project_name, folder_id=productivity_id) ``` ??? success "Result" The project was created in the group folder. [![project-create-with-folder.png](https://i.postimg.cc/mr53rmfN/project-create-with-folder.png)](https://postimg.cc/rd5RnCpK) !!! example "Multiple Projects (batch)" To create multiple projects, you will need to build the projects locally prior to calling the `create` method. This can be accomplished using the [builder][managers.projects.ProjectManager.builder] method. Pass in a list of the locally created project objects to create them remotely. !!! warning "(Again About Folders)" The project folders should already be created prior to calling the create method. ```python # Lets assume that we have a project group folder that already exists named 'Productivity' productivity_folder = client.get_by_fields(name='Productivity', search='project_folders') productivity_id = productivity_folder['id'] # Names of our projects name_1 = 'Reading' name_2 = 'Writing' # Build the local projects project1 = client.project.builder(name_1, folder_id=productivity_id) project2 = client.project.builder(name_2, folder_id=productivity_id) project_list = [project1, project2] # Create the projects project_object = client.project.create(project_list) ``` ??? success "Result" When multiple projects are created, the dictionaries will be returned in a list in the same order as the input. ```python [{'id': '5ffe24a18f081003f3294c44', 'name': 'Reading', 'isOwner': True, 'color': '#6fcbdf', 'inAll': True, 'sortOrder': 0, 'sortType': None, 'userCount': 1, 'etag': 'qbj4z0gl', 'modifiedTime': '2021-01-12T22:37:21.823+0000', 'closed': None, 'muted': False, 'transferred': None, 'groupId': '5ffe11b7b04b356ce74d49da', 'viewMode': None, 'notificationOptions': None, 'teamId': None, 'permission': None, 'kind': 'TASK'}, {'id': '5ffe24a18f081003f3294c46', 'name': 'Writing', 'isOwner': True, 'color': '#9730ce', 'inAll': True, 'sortOrder': 0, 'sortType': None, 'userCount': 1, 'etag': 'u0loxz2v', 'modifiedTime': '2021-01-12T22:37:21.827+0000', 'closed': None, 'muted': False, 'transferred': None, 'groupId': '5ffe11b7b04b356ce74d49da', 'viewMode': None, 'notificationOptions': None, 'teamId': None, 'permission': None, 'kind': 'TASK'}] ``` [![project-batch-create.png](https://i.postimg.cc/8CHH8xSZ/project-batch-create.png)](https://postimg.cc/d7hdrHDC) """ if isinstance(name, list): # If task name is a list, we will batch create objects obj = name batch = True # Create the single project object elif isinstance(name, str): batch = False obj = self.builder( name=name, color=color, project_type=project_type, folder_id=folder_id ) obj = [obj] else: raise TypeError( f"Required Positional Argument Must Be A String or List of Project Objects" ) url = self._client.BASE_URL + "batch/project" payload = {"add": obj} response = self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() if len(obj) == 1: return self._client.get_by_id( self._client.parse_id(response), search="projects" ) else: etag = response["id2etag"] etag2 = list(etag.keys()) # Get the ids items = [""] * len(obj) # Create enough spots for the objects for proj_id in etag2: found = self._client.get_by_id(proj_id, search="projects") for original in obj: if found["name"] == original["name"]: # Get the index of original index = obj.index(original) # Place found at the index in return list items[index] = found return items def update(self, obj): """ Updates the passed project(s). Supports single project update and multiple project update (batch) Make local changes to the project objects that you want to change first, then pass the actual objects to the method. !!! info Every potential update to a project's attributes have not been tested. See [Example `TickTick` Project Dictionary](projects.md#example-ticktick-project-dictionary) for a listing of the fields present in a project. Arguments: obj (dict or list): **Single Project (dict)**: The project dictionary. **Multiple Projects (list)**: A list of project dictionaries. Returns: dict or list: **Single Project (dict)**: The updated project dictionary **Multiple Projects (list)**: A list containing the updated project dictionaries. Raises: TypeError: If the input is not a dict or a list. RuntimeError: If the projects could not be updated successfully. Updates are done by changing the fields in the objects locally first. !!! example "Single Project Update" === "Changing The Name" ```python # Lets assume that we have a project named "Reading" that we want to change to "Summer Reading" project = client.get_by_fields(name='Reading', search='projects') # Get the project # Now lets change the name project['name'] = 'Summer Reading' # Updating a single project requires just passing in the entire dictionary. updated = client.project.update(project) ``` ??? success "Result" The dictionary is returned and the name changed remotely. ```python {'id': '5ffe24a18f081003f3294c44', 'name': 'Summer Reading', 'isOwner': True, 'color': '#6fcbdf', 'inAll': True, 'sortOrder': -6236426731520, 'sortType': 'sortOrder', 'userCount': 1, 'etag': '0vbsvn8e', 'modifiedTime': '2021-01-12T23:38:16.456+0000', 'closed': None, 'muted': False, 'transferred': None, 'groupId': '5ffe2d37b04b35082bbcdf74', 'viewMode': 'list', 'notificationOptions': None, 'teamId': None, 'permission': None, 'kind': 'TASK'} ``` **Before** [![project-update-before.png](https://i.postimg.cc/K8hcpzvP/project-update-before.png)](https://postimg.cc/crTNrd3C) **After** [![project-update-after.png](https://i.postimg.cc/DwcWqsdJ/project-update-after.png)](https://postimg.cc/FY7svY6N) !!! example "Multiple Project Update" === "Changing Multiple Names" ```python # Lets assume that we have a project named "Writing" that we want to change to "Summer Reading" project = client.get_by_fields(name='Writing', search='projects') # Get the project project['name'] = 'Summer Writing' # Lets assume that we have a project named "Movies" that we want to change to "Summer Movies" movie_project = client.get_by_fields(name='Movies', search='projects') movie_project['name'] = 'Summer Movies' # Updating multiple projects requires passing the projects in a list. update_list = [project, movie_project] # Lets update remotely now updated_projects = client.project.update(update_list) ``` ??? success "Result" A list containing the updated projects is returned. ```python [{'id': '5ffe24a18f081003f3294c46', 'name': 'Summer Reading', 'isOwner': True, 'color': '#9730ce', 'inAll': True, 'sortOrder': 0, 'sortType': None, 'userCount': 1, 'etag': 'bgl0pkm8', 'modifiedTime': '2021-01-13T00:13:29.796+0000', 'closed': None, 'muted': False, 'transferred': None, 'groupId': '5ffe11b7b04b356ce74d49da', 'viewMode': None, 'notificationOptions': None, 'teamId': None, 'permission': None, 'kind': 'TASK'}, {'id': '5ffe399c8f08237f3d144ece', 'name': 'Summer Movies', 'isOwner': True, 'color': '#F18181', 'inAll': True, 'sortOrder': -2843335458816, 'sortType': 'sortOrder', 'userCount': 1, 'etag': 'jmjy1xtc', 'modifiedTime': '2021-01-13T00:13:29.800+0000', 'closed': None, 'muted': False, 'transferred': None, 'groupId': '5ffe11b7b04b356ce74d49da', 'viewMode': None, 'notificationOptions': None, 'teamId': None, 'permission': None, 'kind': 'TASK'}] ``` **Before** [![project-update-multiople.png](https://i.postimg.cc/9QbcJH81/project-update-multiople.png)](https://postimg.cc/zyLmG61R) **After** [![project-update-multiple-after.png](https://i.postimg.cc/3RVGNv2y/project-update-multiple-after.png)](https://postimg.cc/0MGjHrWx) """ # Check the types if not isinstance(obj, dict) and not isinstance(obj, list): raise TypeError("Project objects must be a dict or list of dicts.") if isinstance(obj, dict): tasks = [obj] else: tasks = obj url = self._client.BASE_URL + "batch/project" payload = {"update": tasks} response = self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() if len(tasks) == 1: return self._client.get_by_id( self._client.parse_id(response), search="projects" ) else: etag = response["id2etag"] etag2 = list(etag.keys()) # Get the ids items = [""] * len(obj) # Create enough spots for the objects for proj_id in etag2: found = self._client.get_by_id(proj_id, search="projects") for original in obj: if found["name"] == original["name"]: # Get the index of original index = obj.index(original) # Place found at the index in return list items[index] = found return items def delete(self, ids): """ Deletes the project(s) with the passed ID string. !!! warning [Tasks](tasks.md) will be deleted from the project. If you want to preserve the tasks before deletion, use [move_all][managers.tasks.TaskManager.move_all] Arguments: ids (str or list): **Single Deletion (str)**: ID string of the project **Multiple Deletions (list)**: List of ID strings of projects to be deleted. Returns: dict or list: **Single Deletion (dict)**: Dictionary of the deleted project. **Multiple Deletions (list)**: A list of dictionaries of the deleted projects. Raises: TypeError: If `ids` is not a string or list of strings ValueError: If `ids` does not exist. RuntimeError: If the deletion was not successful. !!! example === "Single Project Deletion" ```python # Lets assume that we have a project that exists named 'School' school = client.get_by_fields(name='School', search='projects') # Get the project object project_id = school['id'] # Get the project id delete = client.project.delete(project_id) ``` A dictionary of the deleted project object will be returned. === "Multiple Project Deletion" ```python # Lets assume that we have two projects that we want to delete: 'School' and 'Work' school = client.get_by_fields(name='School', search='projects') # Get the project object work = client.get_by_fields(name='Work', search='projects') delete_ids = [school['id'], work['id']] # A list of the ID strings of the projects to be deleted delete = client.project.delete(delete_ids) ``` A list of the deleted dictionary objects will be returned. """ if not isinstance(ids, str) and not isinstance(ids, list): raise TypeError("Ids Must Be A String or List Of Strings") if isinstance(ids, str): proj = self._client.get_by_fields(id=ids, search="projects") if not proj: raise ValueError(f"Project '{ids}' Does Not Exist To Delete") ids = [ids] else: for i in ids: proj = self._client.get_by_fields(id=i, search="projects") if not proj: raise ValueError(f"Project '{i}' Does Not Exist To Delete") # Delete the task url = self._client.BASE_URL + "batch/project" payload = {"delete": ids} self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) # Delete the list deleted_list = [] for current_id in ids: tasks = self._client.task.get_from_project(current_id) for task in tasks: self._client.delete_from_local_state(id=task["id"], search="tasks") deleted_list.append( self._client.delete_from_local_state(id=current_id, search="projects") ) if len(deleted_list) == 1: return deleted_list[0] else: return deleted_list def archive(self, ids): """ Moves the project(s) to a project folder created by `TickTick` called "Archived Lists" To unarchive a project, change its `'closed'` field to `True`, then [update][managers.projects.ProjectManager.update] Arguments: ids (str or list): **Single Project (str)**: ID string of the project to archive. **Multiple Projects (list)**: List of ID strings of the projects to archive. Returns: dict or list: **Single Project (dict)**: Dictionary of the archived object. **Multiple Projects (list)**: List of dictionaries of the archived objects. Raises: TypeError: If `ids` is not a string or list. ValueError: If the project(s) don't already exist RuntimeError: If the project(s) could not be successfully archived. !!! example === "Single Project Archive" ```python # Lets assume there is a project that exists named "Reading" reading_project = client.get_by_fields(name="Reading", search="projects") reading_project_id = reading_project['id'] archived = client.project.archive(reading_project_id) ``` ??? success "Result" A single dictionary is returned. ```python {'id': '5ffe1673e4b062d60dd29dc0', 'name': 'Reading', 'isOwner': True, 'color': '#51b9e3', 'inAll': True, 'sortOrder': 0, 'sortType': 'sortOrder', 'userCount': 1, 'etag': 'c9tgze9b', 'modifiedTime': '2021-01-13T00:34:50.449+0000', 'closed': True, 'muted': False, 'transferred': None, 'groupId': None, 'viewMode': None, 'notificationOptions': None, 'teamId': None, 'permission': None, 'kind': 'TASK'} ``` **Before** [![archive-before.png](https://i.postimg.cc/R0jfVt7W/archive-before.png)](https://postimg.cc/B8BtmXw3) **After** [![archived-after.png](https://i.postimg.cc/xjPkBh4J/archived-after.png)](https://postimg.cc/K4RvMqFx) === "Multiple Project Archive" ```python # Lets assume there is a project that exists named "Reading" reading_project = client.get_by_fields(name="Reading", search="projects") reading_project_id = reading_project['id'] # Lets assume another project exists named "Writing" writing_project = client.get_by_fields(name='Writing', search='projects') writing_project_id = writing_project['id'] # Archiving multiple requires putting the ID's in a list. archive_list = [reading_project_id, writing_project_id] archived = client.project.archive(archive_list) ``` ??? success "Result" A list of dictionary objects is returned. ```python [{'id': '5ffe1673e4b062d60dd29dc0', 'name': 'Reading', 'isOwner': True, 'color': '#51b9e3', 'inAll': True, 'sortOrder': -7335938359296, 'sortType': 'sortOrder', 'userCount': 1, 'etag': 'qrga45as', 'modifiedTime': '2021-01-13T00:40:49.839+0000', 'closed': True, 'muted': False, 'transferred': None, 'groupId': None, 'viewMode': None, 'notificationOptions': None, 'teamId': None, 'permission': None, 'kind': 'TASK'}, {'id': '5ffe41328f08237f3d147e33', 'name': 'Writing', 'isOwner': True, 'color': '#F2B04B', 'inAll': True, 'sortOrder': -7885694173184, 'sortType': 'sortOrder', 'userCount': 1, 'etag': 'aenkajam', 'modifiedTime': '2021-01-13T00:40:49.843+0000', 'closed': True, 'muted': False, 'transferred': None, 'groupId': None, 'viewMode': None, 'notificationOptions': None, 'teamId': None, 'permission': None, 'kind': 'TASK'}] ``` **Before** [![archive-multiple-before.png](https://i.postimg.cc/sgHHmnrb/archive-multiple-before.png)](https://postimg.cc/qNnGMxgG) **After** [![archived-multiple-after.png](https://i.postimg.cc/tg1SMhRJ/archived-multiple-after.png)](https://postimg.cc/rdkNdRr2) """ if not isinstance(ids, str) and not isinstance(ids, list): raise TypeError("Ids Must Be A String or List Of Strings") objs = [] if isinstance(ids, str): proj = self._client.get_by_fields(id=ids, search="projects") if not proj: raise ValueError(f"Project '{ids}' Does Not Exist To Archive") # Change the list to archived proj["closed"] = True objs = [proj] else: for i in ids: proj = self._client.get_by_fields(id=i, search="projects") if not proj: raise ValueError(f"Project '{i}' Does Not Exist To Archive") proj["closed"] = True objs.append(proj) return self.update(objs) def create_folder(self, name): """ Creates a project folder to allow for project grouping. Project folder names can be repeated. Arguments: name (str or list): **Single Folder (str)**: A string for the name of the folder **Multiple Folders (list)**: A list of strings for names of the folders. Returns: dict or list: **Single Folder (dict)**: A dictionary for the created folder. **Multiple Folders (list)**: A list of dictionaries for the created folders. Raises: TypeError: If `name` is not a string or list RuntimeError: If the folder(s) could not be created. !!! example === "Creating a Single Folder" A single string for the name is the only parameter needed. ```python project_folder = client.project.create_folder('Productivity') ``` ??? success "Result" A single dictionary is returned. ```python {'id': '5ffe44528f089fb5795c45bf', 'etag': '9eun9kyc', 'name': 'Productivity', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 115781412, 'sortType': 'project', 'teamId': None} ``` [![folder.png](https://i.postimg.cc/HWRTjtRW/folder.png)](https://postimg.cc/c6RpbfdP) === "Creating Multiple Folders" The desired names of the folders are passed to create as a list. ```python names = ['Productivity', 'School', 'Hobbies'] project_folder = client.project.create_folder(names) ``` ??? success "Result" A list of dictionaries containing the foler objects is returned. ```python [{'id': '5ffe45d6e4b062d60dd3ce15', 'etag': '4nvnuiw1', 'name': 'Productivity', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 447666584, 'sortType': 'project', 'teamId': None}, {'id': '5ffe45d6e4b062d60dd3ce16', 'etag': 's072l3pu', 'name': 'School', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 447666584, 'sortType': 'project', 'teamId': None}, {'id': '5ffe45d6e4b062d60dd3ce17', 'etag': '12t1xmt9', 'name': 'Hobbies', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 447666584, 'sortType': 'project', 'teamId': None}] ``` [![folders-multiple.png](https://i.postimg.cc/2jwXKjds/folders-multiple.png)](https://postimg.cc/0rzf6sBn) """ if not isinstance(name, str) and not isinstance(name, list): raise TypeError("Name Must Be A String or List Of Strings") objs = [] if isinstance(name, str): names = {"name": name, "listType": "group"} objs = [names] else: for nm in name: objs.append({"name": nm, "listType": "group"}) url = self._client.BASE_URL + "batch/projectGroup" payload = {"add": objs} response = self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() if len(objs) == 1: return self._client.get_by_id( self._client.parse_id(response), search="project_folders" ) else: etag = response["id2etag"] etag2 = list(etag.keys()) # Get the ids items = [""] * len(objs) # Create enough spots for the objects for proj_id in etag2: found = self._client.get_by_id(proj_id, search="project_folders") for original in objs: if found["name"] == original["name"]: # Get the index of original index = objs.index(original) # Place found at the index in return list items[index] = found return items def update_folder(self, obj): """ Updates the project folders(s) remotely based off changes made locally. Make the changes you want to the project folder(s) first. Arguments: obj (dict or list): **Single Folder (dict)**: The dictionary object of the folder to update. **Multiple Folders (list)**: A list containing dictionary objects of folders to update. Returns: dict or list: **Single Folder (dict)**: The dictionary object of the updated folder. **Multiple Folders (list)**: A list of dictionary objects corresponding to the updated folders. Raises: TypeError: If `obj` is not a dictionary or list RuntimeError: If the updating was unsuccessful. !!! example "Updating A Project Folder" === "Single Folder Update" ```python # Lets assume that we have a folder called "Productivity" productivity_folder = client.get_by_fields(name='Productivity', search='project_folders') # Lets change the name to "Hobbies" productivity_folder['name'] = "Hobbies" # Update updated_folder = client.project.update_folder(productivity_folder) ``` ??? success "Result" The dictionary of the updated folder is returned. ```python {'id': '5ffe7dab8f089fb5795d8ef2', 'etag': 'r9xl60e5', 'name': 'Hobbies', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 447666584, 'sortType': 'project', 'teamId': None} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104408388-c48bbb80-5518-11eb-80d4-34e82bbaffd7.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104408436-e1c08a00-5518-11eb-953a-4933f407e4f9.png) === "Multiple Folder Update" ```python # Lets assume that we have a folder called "Productivity" productivity_folder = client.get_by_fields(name='Productivity', search='project_folders') # Lets assume that we have another folder called "Games" games_folder = client.get_by_fields(name='Games', search='project_folders') # Lets change the "Productivity" folder to "Work" productivity_folder['name'] = "Work" # Lets change the "Games" folder to "Hobbies" games_folder['name'] = "Hobbies" update_list = [productivity_folder, games_folder] # List of objects to update # Update updated_folder = client.project.update_folder(update_list) ``` ??? success "Result" A list of the updated folder objects is returned. ```python [{'id': '5ffe80ce8f08068e86aab288', 'etag': '0oh0pxel', 'name': 'Work', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 447666584, 'sortType': 'project', 'teamId': None}, {'id': '5ffe80cf8f08068e86aab289', 'etag': 'xwvehtfo', 'name': 'Hobbies', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 447666584, 'sortType': 'project', 'teamId': None}] ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104409143-75468a80-551a-11eb-96c8-5953c97d6f6a.png) **After** ![image](https://user-images.githubusercontent.com/56806733/104409181-8bece180-551a-11eb-8424-9f147d85eb80.png) """ # Check the types if not isinstance(obj, dict) and not isinstance(obj, list): raise TypeError("Project objects must be a dict or list of dicts.") if isinstance(obj, dict): tasks = [obj] else: tasks = obj url = self._client.BASE_URL + "batch/projectGroup" payload = {"update": tasks} response = self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) self._client.sync() if len(tasks) == 1: return self._client.get_by_id( self._client.parse_id(response), search="project_folders" ) else: etag = response["id2etag"] etag2 = list(etag.keys()) # Get the ids items = [""] * len(tasks) # Create enough spots for the objects for proj_id in etag2: found = self._client.get_by_id(proj_id, search="project_folders") for original in tasks: if found["name"] == original["name"]: # Get the index of original index = tasks.index(original) # Place found at the index in return list items[index] = found return items def delete_folder(self, ids): """ Deletes the folder(s). !!! tip Any projects inside of the folder will be preserved - they will just not be grouped anymore. Arguments: ids (str or list): **Single Folder (str)**: The ID of the folder to be deleted. **Multiple Folders (list)**: A list containing the ID strings of the folders to be deleted. Returns: dict or list: **Single Folder (dict)**: The dictionary object for the deleted folder. **Multiple Folders (list)**: A list of dictionary objects of the deleted folders. Raises: TypeError: If `ids` is not a str or list ValueError: If `ids` does not match an actual folder object. RunTimeError: If the folders could not be successfully deleted. !!! example "Folder Deletion" === "Single Folder Deletion" Pass in the ID of the folder object to delete it remotely. ```python # Lets assume we have a folder named "Productivity" project_folder = client.get_by_fields(name='Productivity', search='project_folders') # Get the project folder deleted_folder = client.project.delete_folder(project_folder['id']) ``` ??? success "Result" The folder is deleted, and a single dictionary of the deleted folder object is returned. ```python {'id': '5ffe75008f089fb5795d544a', 'etag': 'e95rdzi7', 'name': 'Productivity', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 447666584, 'sortType': 'project', 'teamId': None} ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104407093-b5573e80-5515-11eb-99dc-16ca4f33d06a.png) **After** The project inside still exists. ![image](https://user-images.githubusercontent.com/56806733/104407123-c607b480-5515-11eb-92ff-15df1d41b404.png) === "Multiple Folder Deletion" Pass in the list of ID strings of the folders to be deleted. ```python # Lets assume that we have two folders that already exist: "Productivity" and "Hobbies" productivity_folder = client.get_by_fields(name='Productivity', search='project_folders') hobbies_folder = client.get_by_fields(name='Hobbies', search='project_folders') ids = [productivity_folder['id'], hobbies_folder['id']] deleted_folders = client.project.delete_folder(ids) ``` ??? success "Result" The folders are deleted, and a list of dictionaries for the deleted folder objects are returned. ```python [{'id': '5ffe79d78f08237f3d1636ad', 'etag': '2o2dn2al', 'name': 'Productivity', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 447666584, 'sortType': 'project', 'teamId': None}, {'id': '5ffe79d78f08237f3d1636ae', 'etag': 'mah5a78l', 'name': 'Hobbies', 'showAll': True, 'sortOrder': 0, 'deleted': 0, 'userId': 447666584, 'sortType': 'project', 'teamId': None}] ``` **Before** ![image](https://user-images.githubusercontent.com/56806733/104407469-8097b700-5516-11eb-9919-069e5beb3b8a.png) **After** All folders deleted and all projects retained. ![image](https://user-images.githubusercontent.com/56806733/104407546-a8871a80-5516-11eb-815b-4df41e3d797a.png) """ if not isinstance(ids, str) and not isinstance(ids, list): raise TypeError("Ids Must Be A String or List Of Strings") if isinstance(ids, str): proj = self._client.get_by_fields(id=ids, search="project_folders") if not proj: raise ValueError(f"Project Folder '{ids}' Does Not Exist To Delete") ids = [ids] else: for i in ids: proj = self._client.get_by_fields(id=i, search="project_folders") if not proj: raise ValueError(f"Project Folder '{i}' Does Not Exist To Delete") url = self._client.BASE_URL + "batch/projectGroup" payload = {"delete": ids} self._client.http_post( url, json=payload, cookies=self._client.cookies, headers=self.headers ) # Delete the list deleted_list = [] for current_id in ids: deleted_list.append( self._client.get_by_id(current_id, search="project_folders") ) self._client.sync() if len(deleted_list) == 1: return deleted_list[0] else: return deleted_list
/robolson-0.3.83-py3-none-any.whl/rob/ticktick/managers/projects.py
0.808332
0.558688
projects.py
pypi
from pathlib import Path import numpy as np import pinocchio as pin import torch from robomeshcat import Scene, Human "This examples show how to use the human model and how to animate its pose and color. " "We show three case studies: " "(i) the first case study shows how to manipulate human online, i.e. without animation" "(ii) this case study shows how to animate pose and color of the human in case of uniform color" "(iii) this case study shows how use per vertex color of the human (only in online mode, no animation yet!)" case_study = 2 # chose which case study to visualize scene = Scene() "Set smplx_models_path to the directory where are your SMPLX models" smplx_models_path = str(Path(__file__).parent.joinpath('models').joinpath('smplx')) human_default_pose = pin.exp6(np.array([0, 0, 0, np.pi / 2, 0., 0.])).homogeneous human_default_pose[2, 3] = 1.2 if case_study == 0: "First let's create the human, arguments are forward to smplx constructor, so you can adjust the human model args" human = Human(pose=human_default_pose, color=[1., 0., 0.], model_path=smplx_models_path) scene.add_object(human) # add human to the scene, it will be visualized immediately input('Press enter to change the body pose and shape of the human') human.smplx_model.body_pose.data += 0.1 # modify the pose human.smplx_model.betas.data += 0.1 # modify shape params human.smplx_model.expression.data += 0.1 # modify expression param human.update_vertices() # recreate the geometry model to update in the viewer, this is allowed only in online use input('Press enter to change the color, opacity, and position of the human') human.pos[0] = 1. human.color = [0, 1, 0] human.opacity = 0.5 input('Press enter to hide the model and exit.') human.hide() elif case_study == 1: human = Human(pose=human_default_pose, color=[1., 0., 0.], model_path=smplx_models_path) # You need to create all the animation poses of the human in advance of adding the human to the scene # It's called morphologies of the pose human.smplx_model.betas.data += 1 human.add_morph(human.get_vertices()) # the first morph changes the shape human.smplx_model.body_pose.data += 0.1 human.add_morph(human.get_vertices()) # the second morp changes the body pose scene.add_object(human) # add human to the scene, no morphology can be added/modified after this step "Let's animate" with scene.animation(fps=1): human.display_morph(None) # this will display the human shape that is not affected by morphologies scene.render() human.display_morph(0) scene.render() human.display_morph(1) scene.render() human.color = [0, 0.8, 0] scene.render() # You can also change the .pos, .rot, .opacity, .visible, in animation elif case_study == 2: # To have per vertex colors, use attribute use_vertex_colors=True human = Human(pose=human_default_pose, color=[1., 0., 0.], model_path=smplx_models_path, use_vertex_colors=True) scene.add_object(human) input('press enter to change colors to random') human.update_vertices(vertices_colors=np.random.rand(human.smplx_model.get_num_verts(), 3)) input('press enter to change colors to blue') human.update_vertices(vertices_colors=[[0., 0., 0.75]] * human.smplx_model.get_num_verts()) input('press enter to display wireframe') human._show_wireframe = True human.update_vertices(vertices_colors=[[0., 0., 0.]] * human.smplx_model.get_num_verts()) input('sample new expressions and store them into video, rotate manually to see the face') # human.update_vertices(vertices=human.get_vertices(betas=torch.randn([1, 10]))) human._show_wireframe = False human._vertex_colors[:] = 0.6 with scene.video_recording(filename='/tmp/face_expression.mp4', fps=1): for _ in range(10): human.update_vertices(vertices=human.get_vertices(expression=torch.randn([1, 10]))) scene.render()
/robomeshcat-1.1.1.tar.gz/robomeshcat-1.1.1/examples/06_human.py
0.697609
0.816553
06_human.py
pypi
# robomimic <p align="center"> <img width="24.0%" src="docs/images/task_lift.gif"> <img width="24.0%" src="docs/images/task_can.gif"> <img width="24.0%" src="docs/images/task_tool_hang.gif"> <img width="24.0%" src="docs/images/task_square.gif"> <img width="24.0%" src="docs/images/task_lift_real.gif"> <img width="24.0%" src="docs/images/task_can_real.gif"> <img width="24.0%" src="docs/images/task_tool_hang_real.gif"> <img width="24.0%" src="docs/images/task_transport.gif"> </p> [**[Homepage]**](https://arise-initiative.github.io/robomimic-web/) &ensp; [**[Documentation]**](https://arise-initiative.github.io/robomimic-web/docs/introduction/overview.html) &ensp; [**[Study Paper]**](https://arxiv.org/abs/2108.03298) &ensp; [**[Study Website]**](https://arise-initiative.github.io/robomimic-web/study/) &ensp; [**[ARISE Initiative]**](https://github.com/ARISE-Initiative) ------- ## Latest Updates - [12/16/2021] **v0.2.0**: Modular observation modalities and encoders :wrench:, support for [MOMART](https://sites.google.com/view/il-for-mm/home) datasets :open_file_folder: - [08/09/2021] **v0.1.0**: Initial code and paper release ------- **robomimic** is a framework for robot learning from demonstration. It offers a broad set of demonstration datasets collected on robot manipulation domains, and learning algorithms to learn from these datasets. This project is part of the broader [Advancing Robot Intelligence through Simulated Environments (ARISE) Initiative](https://github.com/ARISE-Initiative), with the aim of lowering the barriers of entry for cutting-edge research at the intersection of AI and Robotics. Imitating human demonstrations is a promising approach to endow robots with various manipulation capabilities. While recent advances have been made in imitation learning and batch (offline) reinforcement learning, a lack of open-source human datasets and reproducible learning methods make assessing the state of the field difficult. The overarching goal of **robomimic** is to provide researchers and practitioners with: - a **standardized set of large demonstration datasets** across several benchmarking tasks to facilitate fair comparisons, with a focus on learning from human-provided demonstrations - a **standardized set of large demonstration datasets** across several benchmarking tasks to facilitate fair comparisons, with a focus on learning from human-provided demonstrations (see [this link](https://arise-initiative.github.io/robomimic-web/docs/introduction/quickstart.html#supported-datasets) for a list of supported datasets) - **high-quality implementations of several learning algorithms** for training closed-loop policies from offline datasets to make reproducing results easy and lower the barrier to entry - a **modular design** that offers great flexibility in extending algorithms and designing new algorithms This release of **robomimic** contains seven offline learning [algorithms](https://arise-initiative.github.io/robomimic-web/docs/modules/algorithms.html) and standardized [datasets](https://arise-initiative.github.io/robomimic-web/docs/introduction/results.html) collected across five simulated and three real-world multi-stage manipulation tasks of varying complexity. We highlight some features below (for a more thorough list of features, see [this link](https://arise-initiative.github.io/robomimic-web/docs/introduction/quickstart.html#features-overview)): - **standardized datasets:** a set of datasets collected from different sources (single proficient human, multiple humans, and machine-generated) across several simulated and real-world tasks, along with a plug-and-play [Dataset class](https://arise-initiative.github.io/robomimic-web/docs/modules/datasets.html) to easily use the datasets outside of this project - **algorithm implementations:** several high-quality implementations of offline learning algorithms, including BC, BC-RNN, HBC, IRIS, BCQ, CQL, and TD3-BC - **multiple observation spaces:** support for learning both low-dimensional and visuomotor policies, with support for observation tensor dictionaries throughout the codebase, making it easy to specify different subsets of observations to train a policy. This includes a set of useful tensor utilities to work with nested dictionaries of torch Tensors and numpy arrays. - **visualization utilities:** utilities for visualizing demonstration data, playing back actions, visualizing trained policies, and collecting new datasets using trained policies - **train launching utilities:** utilities for easily running hyperparameter sweeps, enabled by a flexible [Config](https://arise-initiative.github.io/robomimic-web/docs/modules/configs.html) management system ## Contributing to robomimic This framework originally began development in late 2018. Researchers in the [Stanford Vision and Learning Lab](http://svl.stanford.edu/) (SVL) used it as an internal tool for training policies from offline human demonstration datasets. Now it is actively maintained and used for robotics research projects across multiple labs. We welcome community contributions to this project. For details please check our [contributing guidelines](https://arise-initiative.github.io/robomimic-web/docs/miscellaneous/contributing.html). ## Troubleshooting Please see the [troubleshooting](https://arise-initiative.github.io/robomimic-web/docs/miscellaneous/troubleshooting.html) section for common fixes, or [submit an issue](https://github.com/ARISE-Initiative/robomimic/issues) on our github page. ## Reproducing study results The **robomimic** framework also makes reproducing the results from this [study](https://arise-initiative.github.io/robomimic-web/study.) easy. See the [results documentation](https://arise-initiative.github.io/robomimic-web/docs/introduction/results.html) for more information. ## Citations Please cite [this paper](https://arxiv.org/abs/2108.03298) if you use this framework in your work: ```bibtex @inproceedings{robomimic2021, title={What Matters in Learning from Offline Human Demonstrations for Robot Manipulation}, author={Ajay Mandlekar and Danfei Xu and Josiah Wong and Soroush Nasiriany and Chen Wang and Rohun Kulkarni and Li Fei-Fei and Silvio Savarese and Yuke Zhu and Roberto Mart\'{i}n-Mart\'{i}n}, booktitle={arXiv preprint arXiv:2108.03298}, year={2021} } ```
/robomimic-0.2.0.tar.gz/robomimic-0.2.0/README.md
0.76533
0.992788
README.md
pypi
from collections import OrderedDict import torch from robomimic.models.obs_nets import ObservationEncoder, MLP, ObservationDecoder from robomimic.models.base_nets import CropRandomizer import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils def simple_obs_example(): obs_encoder = ObservationEncoder(feature_activation=torch.nn.ReLU) # There are two ways to construct the network for processing a input observation. # 1. Construct through keyword args and class name # Assume we are processing image input of shape (3, 224, 224). camera1_shape = [3, 224, 224] # We will use a reconfigurable image processing backbone VisualCore to process the input image observation key net_class = "VisualCore" # this is defined in models/base_nets.py # kwargs for VisualCore network net_kwargs = { "input_shape": camera1_shape, "backbone_class": "ResNet18Conv", # use ResNet18 as the visualcore backbone "backbone_kwargs": {"pretrained": False, "input_coord_conv": False}, "pool_class": "SpatialSoftmax", # use spatial softmax to regularize the model output "pool_kwargs": {"num_kp": 32} } # register the network for processing the observation key obs_encoder.register_obs_key( name="camera1", shape=camera1_shape, net_class=net_class, net_kwargs=net_kwargs, ) # 2. Alternatively, we could initialize the observation key network outside of the ObservationEncoder # The image doesn't have to be of the same shape camera2_shape = [3, 160, 240] # We could also attach an observation randomizer to perturb the input observation key before sending to the network image_randomizer = CropRandomizer(input_shape=camera2_shape, crop_height=140, crop_width=220) # the cropper will alter the input shape net_kwargs["input_shape"] = image_randomizer.output_shape_in(camera2_shape) net = ObsUtils.OBS_ENCODER_CORES[net_class](**net_kwargs) obs_encoder.register_obs_key( name="camera2", shape=camera2_shape, net=net, randomizer=image_randomizer, ) # ObservationEncoder also supports weight sharing between keys camera3_shape = [3, 224, 224] obs_encoder.register_obs_key( name="camera3", shape=camera3_shape, share_net_from="camera1", ) # We could mix low-dimensional observation, e.g., proprioception signal, in the encoder proprio_shape = [12] net = MLP(input_dim=12, output_dim=32, layer_dims=(128,), output_activation=None) obs_encoder.register_obs_key( name="proprio", shape=proprio_shape, net=net, ) # Before constructing the encoder, make sure we register all of our observation keys with corresponding modalities # (this will determine how they are processed during training) obs_modality_mapping = { "low_dim": ["proprio"], "rgb": ["camera1", "camera2", "camera3"], } ObsUtils.initialize_obs_modality_mapping_from_dict(modality_mapping=obs_modality_mapping) # Finally, construct the observation encoder obs_encoder.make() # pretty-print the observation encoder print(obs_encoder) # Construct fake inputs inputs = { "camera1": torch.randn(camera1_shape), "camera2": torch.randn(camera2_shape), "camera3": torch.randn(camera3_shape), "proprio": torch.randn(proprio_shape) } # Add a batch dimension inputs = TensorUtils.to_batch(inputs) # Send to GPU if applicable if torch.cuda.is_available(): inputs = TensorUtils.to_device(inputs, torch.device("cuda:0")) obs_encoder.cuda() # output from each obs key network is concatenated as a flat vector. # The concatenation order is the same as the keys are registered obs_feature = obs_encoder(inputs) print(obs_feature.shape) # A convenient wrapper for decoding the feature vector to named output is ObservationDecoder obs_decoder = ObservationDecoder( input_feat_dim=obs_encoder.output_shape()[0], decode_shapes=OrderedDict({"action": (7,)}) ) # Send to GPU if applicable if torch.cuda.is_available(): obs_decoder.cuda() print(obs_decoder(obs_feature)) if __name__ == "__main__": simple_obs_example()
/robomimic-0.2.0.tar.gz/robomimic-0.2.0/examples/simple_obs_nets.py
0.913797
0.689453
simple_obs_nets.py
pypi
import numpy as np import torch from torch.utils.data import DataLoader import robomimic import robomimic.utils.obs_utils as ObsUtils import robomimic.utils.torch_utils as TorchUtils import robomimic.utils.test_utils as TestUtils import robomimic.utils.file_utils as FileUtils from robomimic.utils.dataset import SequenceDataset from robomimic.config import config_factory from robomimic.algo import algo_factory def get_data_loader(dataset_path): """ Get a data loader to sample batches of data. Args: dataset_path (str): path to the dataset hdf5 """ dataset = SequenceDataset( hdf5_path=dataset_path, obs_keys=( # observations we want to appear in batches "robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object", ), dataset_keys=( # can optionally specify more keys here if they should appear in batches "actions", "rewards", "dones", ), load_next_obs=True, frame_stack=1, seq_length=10, # length-10 temporal sequences pad_frame_stack=True, pad_seq_length=True, # pad last obs per trajectory to ensure all sequences are sampled get_pad_mask=False, goal_mode=None, hdf5_cache_mode="all", # cache dataset in memory to avoid repeated file i/o hdf5_use_swmr=True, hdf5_normalize_obs=False, filter_by_attribute=None, # can optionally provide a filter key here ) print("\n============= Created Dataset =============") print(dataset) print("") data_loader = DataLoader( dataset=dataset, sampler=None, # no custom sampling logic (uniform sampling) batch_size=100, # batches of size 100 shuffle=True, num_workers=0, drop_last=True # don't provide last batch in dataset pass if it's less than 100 in size ) return data_loader def get_example_model(dataset_path, device): """ Use a default config to construct a BC model. """ # default BC config config = config_factory(algo_name="bc") # read config to set up metadata for observation modalities (e.g. detecting rgb observations) ObsUtils.initialize_obs_utils_with_config(config) # read dataset to get some metadata for constructing model shape_meta = FileUtils.get_shape_metadata_from_dataset( dataset_path=dataset_path, all_obs_keys=sorted(( "robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object", )), ) # make BC model model = algo_factory( algo_name=config.algo_name, config=config, obs_key_shapes=shape_meta["all_shapes"], ac_dim=shape_meta["ac_dim"], device=device, ) return model def print_batch_info(batch): print("\n============= Batch Info =============") for k in batch: if k in ["obs", "next_obs"]: print("key {}".format(k)) for obs_key in batch[k]: print(" obs key {} with shape {}".format(obs_key, batch[k][obs_key].shape)) else: print("key {} with shape {}".format(k, batch[k].shape)) print("") def run_train_loop(model, data_loader): """ Note: this is a stripped down version of @TrainUtils.run_epoch and the train loop in the train function in train.py. Logging and evaluation rollouts were removed. Args: model (Algo instance): instance of Algo class to use for training data_loader (torch.utils.data.DataLoader instance): torch DataLoader for sampling batches """ num_epochs = 50 gradient_steps_per_epoch = 100 has_printed_batch_info = False # ensure model is in train mode model.set_train() for epoch in range(1, num_epochs + 1): # epoch numbers start at 1 # iterator for data_loader - it yields batches data_loader_iter = iter(data_loader) # record losses losses = [] for _ in range(gradient_steps_per_epoch): # load next batch from data loader try: batch = next(data_loader_iter) except StopIteration: # data loader ran out of batches - reset and yield first batch data_loader_iter = iter(data_loader) batch = next(data_loader_iter) if not has_printed_batch_info: has_printed_batch_info = True print_batch_info(batch) # process batch for training input_batch = model.process_batch_for_training(batch) # forward and backward pass info = model.train_on_batch(batch=input_batch, epoch=epoch, validate=False) # record loss step_log = model.log_info(info) losses.append(step_log["Loss"]) # do anything model needs to after finishing epoch model.on_epoch_end(epoch) print("Train Epoch {}: Loss {}".format(epoch, np.mean(losses))) if __name__ == "__main__": # small dataset with a handful of trajectories dataset_path = TestUtils.example_dataset_path() # set torch device device = TorchUtils.get_torch_device(try_to_use_cuda=True) # get model model = get_example_model(dataset_path=dataset_path, device=device) # get dataset loader data_loader = get_data_loader(dataset_path=dataset_path) # run train loop run_train_loop(model=model, data_loader=data_loader)
/robomimic-0.2.0.tar.gz/robomimic-0.2.0/examples/simple_train_loop.py
0.835919
0.407333
simple_train_loop.py
pypi
import numpy as np import torch import robomimic from robomimic.models import EncoderCore, Randomizer from robomimic.utils.obs_utils import Modality, ScanModality from robomimic.config.bc_config import BCConfig import robomimic.utils.tensor_utils as TensorUtils # Let's create a new modality to handle observation modalities, which will be interpreted as # single frame images, with raw shape (H, W) in range [0, 255] class CustomImageModality(Modality): # We must define the class string name to reference this modality with the @name attribute name = "custom_image" # We must define two class methods: a processor and an unprocessor method. The processor # method should map the raw observations (a numpy array OR torch tensor) into a form / shape suitable for learning, # and the unprocess method should do the inverse operation @classmethod def _default_obs_processor(cls, obs): # We add a channel dimension and normalize them to be in range [-1, 1] return (obs / 255.0 - 0.5) * 2 @classmethod def _default_obs_unprocessor(cls, obs): # We do the reverse return ((obs / 2) + 0.5) * 255.0 # You can also modify pre-existing modalities as well. Let's say you have scan data that pads the ends with a 0, so we # want to pre-process those scans in a different way. We can specify a custom processor / unprocessor # method that will override the default one (assumes obs are a flat 1D array): def custom_scan_processor(obs): # Trim the padded ends return obs[1:-1] def custom_scan_unprocessor(obs): # Re-add the padding # Note: need to check type return np.concatenate([np.zeros(1), obs, np.zeros(1)]) if isinstance(obs, np.ndarray) else \ torch.concat([torch.zeros(1), obs, torch.zeros(1)]) # Override the default functions for ScanModality ScanModality.set_obs_processor(processor=custom_scan_processor) ScanModality.set_obs_unprocessor(unprocessor=custom_scan_unprocessor) # Let's now create a custom encoding class for the custom image modality class CustomImageEncoderCore(EncoderCore): # For simplicity, this will be a pass-through with some simple kwargs def __init__( self, input_shape, # Required, will be inferred automatically at runtime # Any args below here you can specify arbitrarily welcome_str, ): # Always need to run super init first and pass in input_shape super().__init__(input_shape=input_shape) # Anything else should can be custom to your class # Let's print out the welcome string print(f"Welcome! {welcome_str}") # We need to always specify the output shape from this model, based on a given input_shape def output_shape(self, input_shape=None): # this is just a pass-through, so we return input_shape return input_shape # we also need to specify the forward pass for this network def forward(self, inputs): # just a pass through again return inputs # Let's also create a custom randomizer class for randomizing our observations class CustomImageRandomizer(Randomizer): """ A simple example of a randomizer - we make @num_rand copies of each image in the batch, and add some small uniform noise to each. All randomized images will then get passed through the network, resulting in outputs corresponding to each copy - we will pool these outputs across the copies with a simple average. """ def __init__( self, input_shape, num_rand=1, noise_scale=0.01, ): """ Args: input_shape (tuple, list): shape of input (not including batch dimension) num_rand (int): number of random images to create on each forward pass noise_scale (float): magnitude of uniform noise to apply """ super(CustomImageRandomizer, self).__init__() assert len(input_shape) == 3 # (C, H, W) self.input_shape = input_shape self.num_rand = num_rand self.noise_scale = noise_scale def output_shape_in(self, input_shape=None): """ Function to compute output shape from inputs to this module. Corresponds to the @forward_in operation, where raw inputs (usually observation modalities) are passed in. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ # @forward_in takes (B, C, H, W) -> (B, N, C, H, W) -> (B * N, C, H, W). # since only the batch dimension changes, and @input_shape does not include batch # dimension, we indicate that the non-batch dimensions don't change return list(input_shape) def output_shape_out(self, input_shape=None): """ Function to compute output shape from inputs to this module. Corresponds to the @forward_out operation, where processed inputs (usually encoded observation modalities) are passed in. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ # since the @forward_out operation splits [B * N, ...] -> [B, N, ...] # and then pools to result in [B, ...], only the batch dimension changes, # and so the other dimensions retain their shape. return list(input_shape) def forward_in(self, inputs): """ Make N copies of each image, add random noise to each, and move copies into batch dimension to ensure compatibility with rest of network. """ # note the use of @self.training to ensure no randomization at test-time if self.training: # make N copies of the images [B, C, H, W] -> [B, N, C, H, W] out = TensorUtils.unsqueeze_expand_at(inputs, size=self.num_rand, dim=1) # add random noise to each copy out = out + self.noise_scale * (2. * torch.rand_like(out) - 1.) # reshape [B, N, C, H, W] -> [B * N, C, H, W] to ensure network forward pass is unchanged return TensorUtils.join_dimensions(out, 0, 1) return inputs def forward_out(self, inputs): """ Pools outputs across the copies by averaging them. It does this by splitting the outputs from shape [B * N, ...] -> [B, N, ...] and then averaging across N to result in shape [B, ...] to make sure the network output is consistent with what would have happened if there were no randomization. """ # note the use of @self.training to ensure no randomization at test-time if self.training: batch_size = (inputs.shape[0] // self.num_rand) out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, target_dims=(batch_size, self.num_rand)) return out.mean(dim=1) return inputs def __repr__(self): """Pretty print network.""" header = '{}'.format(str(self.__class__.__name__)) msg = header + "(input_shape={}, num_rand={}, noise_scale={})".format( self.input_shape, self.num_rand, self.noise_scale) return msg if __name__ == "__main__": # Now, we can directly reference the classes in our config! config = BCConfig() config.observation.encoder.custom_image.core_class = "CustomImageEncoderCore" # Custom class, in string form config.observation.encoder.custom_image.core_kwargs.welcome_str = "hi there!" # Any custom arguments, of any primitive type that is json-able config.observation.encoder.custom_image.obs_randomizer_class = "CustomImageRandomizer" config.observation.encoder.custom_image.obs_randomizer_kwargs.num_rand = 3 config.observation.encoder.custom_image.obs_randomizer_kwargs.noise_scale = 0.05 # We can also directly use this new modality and associate dataset / observation keys with it! config.observation.modalities.obs.custom_image = ["my_image1", "my_image2"] config.observation.modalities.goal.custom_image = ["my_image2", "my_image3"] # Let's view our config print(config) # That's it! Now we can pass this config into our training script, and robomimic will directly use our # custom modality + encoder network
/robomimic-0.2.0.tar.gz/robomimic-0.2.0/examples/add_new_modality.py
0.920088
0.696591
add_new_modality.py
pypi
import hashlib import logging import typing as tp from base58 import b58decode, b58encode from scalecodec.base import RuntimeConfiguration, ScaleBytes, ScaleType from substrateinterface import Keypair, KeypairType logger = logging.getLogger(__name__) def create_keypair(seed: str, crypto_type: int = KeypairType.SR25519) -> Keypair: """ Create a keypair for further use. :param seed: Account seed (mnemonic or raw) as a key to sign transactions. ``//Alice``, ``//Bob`` etc. supported. :param crypto_type: Use KeypairType.SR25519 or KeypairType.ED25519 cryptography for generating the Keypair. :return: A Keypair instance used by substrate to sign transactions. """ if seed.startswith("0x"): return Keypair.create_from_seed(seed_hex=hex(int(seed, 16)), ss58_format=32, crypto_type=crypto_type) elif seed.startswith("//"): return Keypair.create_from_uri(suri=seed, ss58_format=32, crypto_type=crypto_type) else: return Keypair.create_from_mnemonic(seed, ss58_format=32, crypto_type=crypto_type) def dt_encode_topic(topic: str) -> str: """ Encode any string to be accepted by Digital Twin setSource. Use byte encoding and sha256-hashing. :param topic: Topic name to be encoded. :return: Hashed-encoded topic name """ return f"0x{hashlib.sha256(topic.encode('utf-8')).hexdigest()}" def ipfs_32_bytes_to_qm_hash(string_32_bytes: str) -> str: """ Transform 32 bytes sting (without 2 heading bytes) to an IPFS base58 Qm... hash. :param string_32_bytes: 32 bytes sting (without 2 heading bytes). :return: IPFS base58 Qm... hash. """ if string_32_bytes.startswith("0x"): string_32_bytes = string_32_bytes[2:] return b58encode(b"\x12 " + bytes.fromhex(string_32_bytes)).decode("utf-8") def ipfs_qm_hash_to_32_bytes(ipfs_qm: str) -> str: """ Transform IPFS base58 Qm... hash to a 32 bytes sting (without 2 heading '0x' bytes). :param ipfs_qm: IPFS base58 Qm... hash. :return: 32 bytes sting (without 2 heading bytes). """ return f"0x{b58decode(ipfs_qm).hex()[4:]}" def str_to_scalebytes(data: tp.Union[int, str], type_str: str) -> ScaleBytes: """ Encode string to a desired ScaleBytes data. :param data: String to encode. :param type_str: Type (``U32``, ``Compact<Balance>``, etc.). :return: ScaleBytes object """ scale_obj: ScaleType = RuntimeConfiguration().create_scale_object(type_str) return scale_obj.encode(data) def web_3_auth(seed: str) -> tp.Tuple[str, str]: """ Get authentication header for a Web3-auth IPFS gateway. :param seed: Substrate account seed in any, mnemonic or raw form. :return: Authentication header. """ keypair: Keypair = create_keypair(seed) return f"sub-{keypair.ss58_address}", f"0x{keypair.sign(keypair.ss58_address).hex()}"
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/utils.py
0.846514
0.340211
utils.py
pypi
import click import sys import typing as tp from robonomicsinterface import Account, constants, Datalog, Launch, SubEvent, Subscriber def callback(data: tp.Tuple[tp.Union[str, int]]) -> None: """ callback executed when subscription event triggered. Simply outputs incoming info to console :param data: data to be output """ click.echo(data) @click.group() def cli() -> None: pass @cli.group(help="Send various extrinsics (launch commands or record datalogs)") def write() -> None: pass @cli.group(help="Subscribe to datalog/launch events in the chain") def read() -> None: pass @write.command() @click.option( "--input_string", type=click.File("r"), default=sys.stdin, hidden=True, help="Hidden parameter to perform stdin reading of a passed via pipeline sting", ) @click.option( "--remote_ws", type=str, default=constants.REMOTE_WS, help="Node websocket address used to connect to any node. E.g. local is ws://127.0.0.1:9944. Default is " "wss://kusama.rpc.robonomics.network", ) @click.option("-s", type=str, required=True, help="Account seed in mnemonic/raw form.") def datalog(input_string: sys.stdin, remote_ws: str, s: str) -> None: """ Save string into account's datalog using pipeline: <echo "blah" | robonomics_interface io write datalog (params)> If nothing passed, waits for a string in a new line. """ account: Account = Account(remote_ws=remote_ws, seed=s) datalog_: Datalog = Datalog(account) transaction_hash: str = datalog_.record(input_string.readline()[:-1]) click.echo(transaction_hash) @write.command() @click.option( "--command", type=click.File("r"), default=sys.stdin, hidden=True, help="Hidden parameter to perform stdin reading of a passed via pipeline command", ) @click.option( "--remote_ws", type=str, default=constants.REMOTE_WS, help="Node websocket address used to connect to any node. E.g. local is ws://127.0.0.1:9944. Default is " "wss://kusama.rpc.robonomics.network", ) @click.option("-s", type=str, required=True, help="Account seed in mnemonic/raw form.") @click.option("-r", type=str, required=True, help="Target account ss58_address.") def launch(command: sys.stdin, remote_ws: str, s: str, r: str) -> None: """ Send launch command accompanied by parameter in IPFS Qm... form or just 32 bytes data using pipeline: <echo "Qmc5gCcjYypU7y28oCALwfSvxCBskLuPKWpK4qpterKC7z" | robonomics_interface io write launch (params)> If nothing passed, waits for a string in a new line. """ account: Account = Account(remote_ws=remote_ws, seed=s) launch_: Launch = Launch(account) parameter: str = command.readline()[:-1] transaction_hash: str = launch_.launch(r, parameter) click.echo((transaction_hash, f"{account.get_address()} -> {r}: {parameter}")) @read.command() @click.option( "--remote_ws", type=str, default=constants.REMOTE_WS, help="Node websocket address used to connect to any node. E.g. local is ws://127.0.0.1:9944. Default is " "wss://kusama.rpc.robonomics.network", ) @click.option("-r", type=str, help="Target account ss58_address.") def datalog(remote_ws: str, r: str) -> None: """ Listen to datalogs in the chain whether address-specified or all of them """ account: Account = Account(remote_ws=remote_ws) subscriber: Subscriber = Subscriber(account, SubEvent.NewRecord, subscription_handler=callback, addr=r) pass @read.command() @click.option( "--remote_ws", type=str, default=constants.REMOTE_WS, help="Node websocket address used to connect to any node. E.g. local is ws://127.0.0.1:9944. Default is " "wss://kusama.rpc.robonomics.network", ) @click.option("-r", type=str, help="Target account ss58_address.") def launch(remote_ws: str, r: str) -> None: """ Listen to datalogs in the chain whether address-specified or all of them """ account: Account = Account(remote_ws=remote_ws) subscriber: Subscriber = Subscriber(account, SubEvent.NewLaunch, subscription_handler=callback, addr=r) pass if __name__ == "__main__": cli()
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/robonomics_interface_io.py
0.403684
0.220143
robonomics_interface_io.py
pypi
import typing as tp from logging import getLogger from .base import BaseClass from ..exceptions import DigitalTwinMapException from ..types import DigitalTwinTyping from ..utils import dt_encode_topic logger = getLogger(__name__) class DigitalTwin(BaseClass): """ Class for interacting with `Digital Twins <https://wiki.robonomics.network/docs/en/digital-twins/>`_.. """ @staticmethod def _process_topic(topic: str) -> str: """ Hash topic to a certain length if it doesn't meet topic format requirements. :param topic: Topic name to process. :return: Processed topic name """ try: int(topic, 16) if len(topic) == 66: return topic else: return dt_encode_topic(topic) except ValueError: return dt_encode_topic(topic) def get_info(self, dt_id: int, block_hash: tp.Optional[str] = None) -> tp.Optional[DigitalTwinTyping]: """ Fetch information about existing digital twin. :param dt_id: Digital Twin object ID. :param block_hash: Retrieves data as of passed block hash. :return: List of DigitalTwin associated mapping. ``None`` if no Digital Twin with such id. """ logger.info(f"Fetching info about Digital Twin with ID {dt_id}") return self._service_functions.chainstate_query("DigitalTwin", "DigitalTwin", dt_id, block_hash=block_hash) def get_owner(self, dt_id: int, block_hash: tp.Optional[str] = None) -> tp.Optional[str]: """ Fetch existing Digital Twin owner address. :param dt_id: Digital Twin object ID. :param block_hash: Retrieves data as of passed block hash. :return: Owner address. ``None`` if no Digital Twin with such id. """ logger.info(f"Fetching owner of Digital Twin with ID {dt_id}") return self._service_functions.chainstate_query("DigitalTwin", "Owner", dt_id, block_hash=block_hash) def get_total(self, block_hash: tp.Optional[str] = None) -> tp.Optional[int]: """ Fetch total number of Digital Twins. :param block_hash: Retrieves data as of passed block hash. :return: Total number of Digital Twins. ``None`` if no Digital Twins. """ logger.info("Fetching Total number of Digital Twins") return self._service_functions.chainstate_query("DigitalTwin", "Total", block_hash=block_hash) def get_source(self, dt_id: int, topic: str, block_hash: tp.Optional[str] = None) -> str: """ Find a source for a passed Digital Twin topic. :param dt_id: Digital Twin id. :param topic: Searched topic. Normal string. :param block_hash: Retrieves data as of passed block hash. :return: If found, topic source ss58 address. """ dt_map: tp.Optional[DigitalTwinTyping] = self.get_info(dt_id, block_hash=block_hash) if not dt_map: raise DigitalTwinMapException("No Digital Twin was created or Digital Twin map is empty.") topic_hashed: str = self._process_topic(topic) for source in dt_map: if source[0] == topic_hashed: return source[1] raise DigitalTwinMapException(f"No topic {topic} was found in Digital Twin with id {dt_id}") def create(self, nonce: tp.Optional[int] = None) -> tp.Tuple[int, str]: """ Create a new digital twin. :param nonce: Account nonce. Due to the feature of substrate-interface lib, to create an extrinsic with incremented nonce, pass account's current nonce. See https://github.com/polkascan/py-substrate-interface/blob/85a52b1c8f22e81277907f82d807210747c6c583/substrateinterface/base.py#L1535 for example. :return: Tuple of newly created Digital Twin ID and hash of the creation transaction. """ tr_hash: str = self._service_functions.extrinsic("DigitalTwin", "create", nonce=nonce) dt_total: int = self.get_total() dt_id: int = dt_total for ids in reversed(range(dt_total)): if self.get_owner(ids) == self.account.get_address(): dt_id: int = ids break return dt_id, tr_hash def set_source(self, dt_id: int, topic: str, source: str, nonce: tp.Optional[int] = None) -> tp.Tuple[str, str]: """ Set DT topics and their sources. Since ``topic_name`` is byte encoded and then sha256-hashed, it's considered as good practice saving the map of digital twin in human-readable format in the very first DT topic. Still there is a ``get_source`` function which transforms given string to the format as saved in the chain for comparing. :param dt_id: Digital Twin ID, which should have been created by account, calling this function. :param topic: Topic to add. Any string you want. It will be sha256 hashed and stored in blockchain. :param source: Source address in ss58 format. :param nonce: Account nonce. Due to the feature of substrate-interface lib, to create an extrinsic with incremented nonce, pass account's current nonce. See https://github.com/polkascan/py-substrate-interface/blob/85a52b1c8f22e81277907f82d807210747c6c583/substrateinterface/base.py#L1535 for example. :return: Tuple of hashed topic and transaction hash. """ topic_hashed = self._process_topic(topic) return ( topic_hashed, self._service_functions.extrinsic( "DigitalTwin", "set_source", {"id": dt_id, "topic": topic_hashed, "source": source}, nonce=nonce ), )
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/classes/digital_twin.py
0.850033
0.391697
digital_twin.py
pypi
import typing as tp from logging import getLogger from .base import BaseClass from ..types import ListenersResponse logger = getLogger(__name__) class PubSub(BaseClass): """ Class for handling Robonomics pubsub rpc requests WARNING: THIS MODULE IS UNDER CONSTRUCTION, USE AT YOUR OWN RISK! TO BE UPDATED SOON """ def connect( self, address: str, result_handler: tp.Optional[tp.Callable] = None ) -> tp.Dict[str, tp.Union[str, bool, int]]: """ Connect to peer and add it into swarm. :param address: Multiaddr address of the peer to connect to. :param result_handler: Callback function that processes the result received from the node. :return: Success flag in JSON message. """ return self._service_functions.rpc_request("pubsub_connect", [address], result_handler) def listen( self, address: str, result_handler: tp.Optional[tp.Callable] = None ) -> tp.Dict[str, tp.Union[str, bool, int]]: """ Listen address for incoming connections. :param address: Multiaddr address of the peer to connect to. :param result_handler: Callback function that processes the result received from the node. :return: Success flag in JSON message. """ return self._service_functions.rpc_request("pubsub_listen", [address], result_handler) def get_listeners(self, result_handler: tp.Optional[tp.Callable] = None) -> ListenersResponse: """ Returns a list of node addresses. :param result_handler: Callback function that processes the result received from the node. :return: List of node addresses in JSON message. """ return self._service_functions.rpc_request("pubsub_listeners", None, result_handler) def get_peer(self, result_handler: tp.Optional[tp.Callable] = None) -> tp.Dict[str, tp.Union[str, int]]: """ Returns local peer ID. :return: Local peer ID in JSON message. """ return self._service_functions.rpc_request("pubsub_peer", None, result_handler) def publish( self, topic_name: str, message: str, result_handler: tp.Optional[tp.Callable] = None ) -> tp.Dict[str, tp.Union[str, bool, int]]: """ Publish message into the topic by name. :param topic_name: Topic name. :param message: Message to be published. :param result_handler: Callback function that processes the result received from the node. :return: Success flag in JSON message. """ return self._service_functions.rpc_request("pubsub_publish", [topic_name, message], result_handler) def subscribe( self, topic_name: str, result_handler: tp.Optional[tp.Callable] = None ) -> tp.Dict[str, tp.Union[str, int]]: """ Listen address for incoming connections. :param topic_name: Topic name to subscribe to. :param result_handler: Callback function that processes the result received from the node. :return: Subscription ID in JSON message. """ return self._service_functions.rpc_request("pubsub_subscribe", [topic_name], result_handler) def unsubscribe( self, subscription_id: str, result_handler: tp.Optional[tp.Callable] = None ) -> tp.Dict[str, tp.Union[str, bool, int]]: """ Unsubscribe for incoming messages from topic. :param subscription_id: Subscription ID obtained when subscribed. :param result_handler: Callback function that processes the result received from the node. :return: Success flag in JSON message. """ return self._service_functions.rpc_request("pubsub_unsubscribe", [subscription_id], result_handler)
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/classes/pubsub.py
0.74826
0.266438
pubsub.py
pypi
import time import typing as tp from logging import getLogger from .base import BaseClass from ..types import AuctionTyping, LedgerTyping logger = getLogger(__name__) class RWS(BaseClass): """ Class for interacting with Robonomics Web Services subscriptions """ def get_auction(self, index: int, block_hash: tp.Optional[str] = None) -> tp.Optional[AuctionTyping]: """ Get information about subscription auction. :param index: Auction index. :param block_hash: Retrieves data as of passed block hash. :return: Auction info. """ logger.info(f"Fetching auction {index} information") return self._service_functions.chainstate_query("RWS", "Auction", index, block_hash=block_hash) def get_auction_next(self, block_hash: tp.Optional[str] = None) -> int: """ Get index of the next auction to be unlocked. :param block_hash: Retrieves data as of passed block hash. :return: Auction index. """ logger.info("Fetching index of the next auction to be unlocked") return self._service_functions.chainstate_query("RWS", "AuctionNext", block_hash=block_hash) def get_auction_queue(self, block_hash: tp.Optional[str] = None) -> tp.List[tp.Optional[int]]: """ Get an auction queue of Robonomics Web Services subscriptions. :param block_hash: Retrieves data as of passed block hash. :return: Auction queue of Robonomics Web Services subscriptions. """ logger.info("Fetching auctions queue list") return self._service_functions.chainstate_query("RWS", "AuctionQueue", block_hash=block_hash) def get_devices( self, addr: tp.Optional[str] = None, block_hash: tp.Optional[str] = None ) -> tp.List[tp.Optional[str]]: """ Fetch list of RWS added devices. :param addr: Subscription owner. If ``None`` - account address. :param block_hash: Retrieves data as of passed block hash. :return: List of added devices. Empty if none. """ address: str = addr or self.account.get_address() logger.info(f"Fetching list of RWS devices set by owner {address}") return self._service_functions.chainstate_query("RWS", "Devices", address, block_hash=block_hash) def get_ledger( self, addr: tp.Optional[str] = None, block_hash: tp.Optional[str] = None ) -> tp.Optional[LedgerTyping]: """ Subscription information. :param addr: Subscription owner. If ``None`` - account address. :param block_hash: Retrieves data as of passed block hash. :return: Subscription information. Empty if none. """ address: str = addr or self.account.get_address() logger.info(f"Fetching subscription information by owner {address}") return self._service_functions.chainstate_query("RWS", "Ledger", address, block_hash=block_hash) def get_days_left( self, addr: tp.Optional[str] = None, block_hash: tp.Optional[str] = None ) -> tp.Union[int, bool]: """ Check if RWS subscription is still active for the address. :param addr: Possible subscription owner. If ``None`` - account address. :param block_hash: Retrieves data as of passed block hash. :return: Number of days left if subscription is active, ``False`` if no active subscription, -1 for a Lifetime subscription. """ address: str = addr or self.account.get_address() logger.info(f"Fetching RWS subscription status for {address}") ledger: LedgerTyping = self._service_functions.chainstate_query("RWS", "Ledger", address, block_hash=block_hash) if not ledger: return False if "Lifetime" in ledger["kind"]: return -1 unix_time_sub_expire: int = ledger["issue_time"] + 86400 * 1000 * ledger["kind"]["Daily"]["days"] days_left: float = (unix_time_sub_expire - time.time() * 1000) / 86400000 if days_left >= 0: return int(days_left) else: return False def is_in_sub( self, sub_owner_addr: str, addr: tp.Optional[str] = None, block_hash: tp.Optional[str] = None ) -> bool: """ Check whether ``addr`` is a device of ``sub_owner_addr`` subscription. :param sub_owner_addr: Subscription owner address. :param addr: Address to check. If ``None`` - account address. :param block_hash: Retrieves data as of passed block hash. :return: ``True`` if ``addr`` is in ``sub_owner_addr`` device list, ``False`` otherwise. """ logger.info(f"Fetching list of RWS devices set by owner {sub_owner_addr}") address: str = addr or self.account.get_address() devices: tp.List[tp.Optional[str]] = self._service_functions.chainstate_query( "RWS", "Devices", sub_owner_addr, block_hash=block_hash ) if address in devices: return True else: return False def bid(self, index: int, amount: int) -> str: """ Bid to win a subscription! :param index: Auction index. :param amount: Your bid in Weiners. :return: Transaction hash. """ logger.info(f"Bidding on auction {index} with {amount} Weiners (appx. {round(amount / 10 ** 9, 2)} XRT)") return self._service_functions.extrinsic("RWS", "bid", {"index": index, "amount": amount}) def set_devices(self, devices: tp.List[str]) -> str: """ Set devices which are authorized to use RWS subscriptions held by the extrinsic author. :param devices: Devices authorized to use RWS subscriptions. Include in list. :return: Transaction hash. """ logger.info(f"Allowing {devices} to use {self.account.get_address()} subscription") return self._service_functions.extrinsic("RWS", "set_devices", {"devices": devices})
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/classes/rws.py
0.825765
0.405154
rws.py
pypi
import typing as tp from logging import getLogger from scalecodec.types import GenericCall, GenericExtrinsic from substrateinterface import Keypair, SubstrateInterface, ExtrinsicReceipt from substrateinterface.exceptions import ExtrinsicFailedException from .account import Account from ..decorators import check_socket_opened from ..exceptions import NoPrivateKeyException from ..types import QueryParams, TypeRegistryTyping, RWSParamsTyping logger = getLogger(__name__) class ServiceFunctions: """ Class for custom queries, extrinsics and RPC calls to Robonomics parachain network. """ def __init__( self, account: Account, wait_for_inclusion: bool = True, return_block_num: bool = False, rws_sub_owner: tp.Optional[str] = None, ): """ Assign Account dataclass parameters and create an empty interface attribute for a decorator. :param account: Account dataclass with ``seed``, ``remote_ws`` and node ``type_registry``. :param wait_for_inclusion: Whether wait for a transaction to included in block. You will get the hash anyway. :param return_block_num: If set to True, any executed extrinsic function will return a tuple of form ``(<extrinsic_hash>, <block_number-idx>)``. ONLY WORKS WHEN ``wait_for_inclusion`` IS SET TO TRUE. :param rws_sub_owner: Subscription owner address. If passed, all extrinsics will be executed via RWS subscriptions. """ self.remote_ws: str = account.remote_ws self.type_registry: TypeRegistryTyping = account.type_registry self.keypair: Keypair = account.keypair self.interface: tp.Optional[SubstrateInterface] = None self.wait_for_inclusion: bool = wait_for_inclusion self.return_block_num: bool = return_block_num self.rws_sub_owner: tp.Optional[str] = rws_sub_owner @check_socket_opened def chainstate_query( self, module: str, storage_function: str, params: QueryParams = None, block_hash: tp.Optional[str] = None, subscription_handler: tp.Optional[callable] = None, ) -> tp.Any: """ Create custom queries to fetch data from the Chainstate. Module names and storage functions, as well as required parameters are available at https://parachain.robonomics.network/#/chainstate. :param module: Chainstate module. :param storage_function: Storage function. :param params: Query parameters. None if no parameters. Include in list, if several. :param block_hash: Retrieves data as of passed block hash. :param subscription_handler: Callback function that processes the updates of the storage query subscription. The workflow is the same as in substrateinterface lib. Calling method with this parameter blocks current thread! Example of subscription handler: https://github.com/polkascan/py-substrate-interface#storage-subscriptions :return: Output of the query in any form. """ logger.info(f"Performing query {module}.{storage_function}") return self.interface.query( module, storage_function, [params] if params is not None else None, block_hash=block_hash, subscription_handler=subscription_handler, ).value @check_socket_opened def extrinsic( self, call_module: str, call_function: str, params: tp.Optional[tp.Dict[str, tp.Any]] = None, nonce: tp.Optional[int] = None, ) -> tp.Union[str, tp.Tuple[str, str]]: """ Create an extrinsic, sign&submit it. Module names and functions, as well as required parameters are available at https://parachain.robonomics.network/#/extrinsics. :param call_module: Call module from extrinsic tab on portal. :param call_function: Call function from extrinsic tab on portal. :param params: Call parameters as a dictionary. ``None`` for no parameters. :param nonce: Transaction nonce, defined automatically if None. Due to the feature of substrate-interface lib, to create an extrinsic with incremented nonce, pass account's current nonce. See https://github.com/polkascan/py-substrate-interface/blob/85a52b1c8f22e81277907f82d807210747c6c583/substrateinterface/base.py#L1535 for example. :return: A tuple of form ``(<extrinsic_hash>, <block_number-idx>)`` if ``return_block_num`` and ``wait_for_inclusion`` in ``__init__`` were set to ``True``. String ``<extrinsic_hash>`` otherwise. """ if not self.keypair: raise NoPrivateKeyException("No seed was provided, unable to use extrinsics.") if not self.rws_sub_owner: logger.info(f"Creating a call {call_module}:{call_function}") call: GenericCall = self.interface.compose_call( call_module=call_module, call_function=call_function, call_params=params or None, ) else: logger.info(f"Creating an RWS call {call_module}:{call_function}") rws_params: RWSParamsTyping = { "subscription_id": self.rws_sub_owner, "call": { "call_module": call_module, "call_function": call_function, "call_args": params, }, } call: GenericCall = self.interface.compose_call( call_module="RWS", call_function="call", call_params=rws_params ) logger.info("Creating extrinsic") extrinsic: GenericExtrinsic = self.interface.create_signed_extrinsic( call=call, keypair=self.keypair, nonce=nonce ) logger.info("Submitting extrinsic") receipt: ExtrinsicReceipt = self.interface.submit_extrinsic( extrinsic, wait_for_inclusion=self.wait_for_inclusion ) logger.info(f"Extrinsic {receipt.extrinsic_hash} for RPC {call_module}:{call_function} submitted.") if self.wait_for_inclusion: if not receipt.is_success: raise ExtrinsicFailedException(receipt.error_message) block_num: int = self.interface.get_block_number(receipt.block_hash) logger.info(f"Extrinsic included in block {block_num}") if self.return_block_num: return receipt.extrinsic_hash, f"{block_num}-{receipt.extrinsic_idx}" else: return receipt.extrinsic_hash else: return receipt.extrinsic_hash @check_socket_opened def rpc_request( self, method: str, params: tp.Optional[tp.List[str]], result_handler: tp.Optional[tp.Callable], ) -> tp.Dict[str, tp.Any]: """ Method that handles the actual RPC request to the Substrate node. The other implemented functions eventually use this method to perform the request. :param method: Method of the ``JSONRPC`` request. :param params: A list containing the parameters of the ``JSONRPC`` request. :param result_handler: Callback function that processes the result received from the node. :return: Result of the request. """ return self.interface.rpc_request(method, params, result_handler) @check_socket_opened def subscribe_block_headers(self, callback: callable) -> dict: """ Get chain head block headers. :return: Chain head block headers. """ return self.interface.subscribe_block_headers(subscription_handler=callback)
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/classes/service_functions.py
0.755997
0.257785
service_functions.py
pypi
import typing as tp from logging import getLogger from .base import BaseClass from ..types import DatalogTyping logger = getLogger(__name__) class Datalog(BaseClass): """ Class for datalog chainstate queries and extrinsic executions. """ def get_index(self, addr: tp.Optional[str] = None, block_hash: tp.Optional[str] = None) -> tp.Dict[str, int]: """ Get account datalog index dictionary. :param addr: ss58 type ``32`` address of an account which datalog index is to be obtained. If ``None``, tries to get Account datalog index if keypair was created, else raises ``NoPrivateKey``. :param block_hash: Retrieves data as of passed block hash. :return: Tuple of form {'start': <int>, 'end': <int>} """ address: str = addr or self.account.get_address() logger.info(f"Fetching datalog index of {address}") return self._service_functions.chainstate_query("Datalog", "DatalogIndex", address, block_hash=block_hash) def get_item( self, addr: tp.Optional[str] = None, index: tp.Optional[int] = None, block_hash: tp.Optional[str] = None ) -> tp.Optional[DatalogTyping]: """ Fetch datalog record of a provided account. Fetch self datalog if no address provided and interface was initialized with a seed. :param addr: ss58 type ``32`` address of an account which datalog is to be fetched. If ``None``, tries to fetch self datalog if keypair was created, else raises ``NoPrivateKey``. :param index: record index. case ``int``: fetch datalog by specified index case ``None``: fetch latest datalog. :param block_hash: Retrieves data as of passed block hash. :return: Tuple. Datalog of the account with a timestamp, ``None`` if no records. """ address: str = addr or self.account.get_address() logger.info( f"Fetching {'latest datalog record' if not index else 'datalog record #' + str(index)}" f" of {address}." ) if index: record: DatalogTyping = self._service_functions.chainstate_query( "Datalog", "DatalogItem", [address, index], block_hash=block_hash ) return record if record[0] != 0 else None else: index_latest: int = self.get_index(address)["end"] - 1 return ( self._service_functions.chainstate_query( "Datalog", "DatalogItem", [address, index_latest], block_hash=block_hash ) if index_latest != -1 else None ) def record(self, data: str, nonce: tp.Optional[int] = None) -> str: """ Write any string to datalog. It has 512 bytes length limit. :param data: String to be stored in datalog. It has 512 bytes length limit. :param nonce: Nonce of the transaction. Due to the feature of substrate-interface lib, to create an extrinsic with incremented nonce, pass account's current nonce. See https://github.com/polkascan/py-substrate-interface/blob/85a52b1c8f22e81277907f82d807210747c6c583/substrateinterface/base.py#L1535 for example. :return: Hash of the datalog record transaction. """ logger.info(f"Writing datalog {data}") return self._service_functions.extrinsic("Datalog", "record", {"record": data}, nonce) def erase(self, nonce: tp.Optional[int] = None) -> str: """ Erase ALL datalog records of Account. :param nonce: Nonce of the transaction. Due to the feature of substrate-interface lib, to create an extrinsic with incremented nonce, pass account's current nonce. See https://github.com/polkascan/py-substrate-interface/blob/85a52b1c8f22e81277907f82d807210747c6c583/substrateinterface/base.py#L1535 for example. :return: Hash of the datalog erase transaction. """ logger.info(f"Erasing all datalogs of Account") return self._service_functions.extrinsic("Datalog", "erase", nonce)
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/classes/datalog.py
0.834137
0.345685
datalog.py
pypi
import typing as tp from logging import getLogger from substrateinterface import SubstrateInterface from ..constants import REMOTE_WS, TYPE_REGISTRY from ..decorators import check_socket_opened from ..exceptions import InvalidExtrinsicHash from ..types import TypeRegistryTyping logger = getLogger(__name__) class ChainUtils: """ Various tools for obtaining information from the blockchain. """ def __init__( self, remote_ws: tp.Optional[str] = None, type_registry: tp.Optional[TypeRegistryTyping] = None, ): """ Initiate ChainUtils class with node address passed as an argument. :param remote_ws: Node url. Default node address is "wss://kusama.rpc.robonomics.network". Another address may be specified (e.g. "ws://127.0.0.1:9944" for local node). :param type_registry: Types used in the chain. Defaults are the most frequently used in Robonomics. """ self.remote_ws: str = remote_ws or REMOTE_WS self.type_registry: TypeRegistryTyping = type_registry or TYPE_REGISTRY self.interface: tp.Optional[SubstrateInterface] = None @check_socket_opened def get_block_number(self, block_hash: str) -> int: """ Get block number by its hash. :param block_hash: Block hash. :return: Block number. """ return self.interface.get_block_number(block_hash) @check_socket_opened def get_block_hash(self, block_number: int) -> str: """ Get block hash by its number. :param block_number: Block number. :return: Block hash. """ return self.interface.get_block_hash(block_number) @staticmethod def _check_hash_valid(data_hash: str): """ Check if the hash is valid. :param data_hash: Extrinsic hash. :return: Bool flag if the hash is valid. """ if not data_hash.startswith("0x") or not len(data_hash) == 66: raise InvalidExtrinsicHash("Not a valid extrinsic has passed") @check_socket_opened def get_extrinsic_in_block( self, block: tp.Union[int, str], extrinsic: tp.Union[None, str, int] = None ) -> tp.Union[None, list, dict]: """ Get all extrinsics in block or a certain extrinsic if its block ``idx`` is specified. :param block: Block pointer. Either block number or block hash. :param extrinsic: Extrinsic in this block. Either its hash or block extrinsic ``idx``. :return: All extrinsics in block or a certain extrinsic if its idx was passed """ def _get_block_any(block_: tp.Union[int, str]) -> list: """ Get all extrinsics in a block given any, block number or hash. :param block_: Block number or hash. :return: All extrinsics in a block. """ return self.interface.get_block( block_hash=(block_ if type(block_) == str else None), block_number=(block_ if type(block_) == int else None), )["extrinsics"] if type(block) == str: self._check_hash_valid(block) if not extrinsic: logger.info(f"Getting all extrinsics of a block {block}...") return _get_block_any(block) else: logger.info(f"Getting extrinsic {block}-{extrinsic}...") if type(extrinsic) == str: self._check_hash_valid(extrinsic) found_extrinsics: list = _get_block_any(block) for extrinsic_ in found_extrinsics: if extrinsic_.value["extrinsic_hash"] == extrinsic: return extrinsic_.value else: return _get_block_any(block)[extrinsic - 1].value
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/classes/chain_utils.py
0.742048
0.326486
chain_utils.py
pypi
import typing as tp from logging import getLogger from scalecodec.base import ScaleBytes from substrateinterface import KeypairType from .base import BaseClass from ..exceptions import NoPrivateKeyException from ..types import LiabilityTyping, ReportTyping from ..utils import ipfs_qm_hash_to_32_bytes, str_to_scalebytes logger = getLogger(__name__) KEYPAIR_TYPE = ["Ed25519", "Sr25519", "Ecdsa"] class Liability(BaseClass): """ Class for interacting with Robonomics Liability. Create and finalize ones, get information. """ def get_agreement(self, index: int, block_hash: tp.Optional[str] = None) -> tp.Optional[LiabilityTyping]: """ Fetch information about existing liabilities. :param index: Liability item index. :param block_hash: Retrieves data as of passed block hash. :return: Liability information: ``technics``, ``economics``, ``promisee``, ``promisor``, ``signatures``. ``None`` if no such liability. """ logger.info(f"Fetching information about liability with index {index}") return self._service_functions.chainstate_query("Liability", "AgreementOf", index, block_hash=block_hash) def get_latest_index(self, block_hash: tp.Optional[str] = None) -> tp.Optional[int]: """ Fetch the latest liability index. :param block_hash: Retrieves data as of passed block hash. :return: Latest liability index. """ logger.info("Fetching total number of liabilities in chain.") next_index: tp.Optional[int] = self._service_functions.chainstate_query( "Liability", "NextIndex", block_hash=block_hash ) if not next_index: return None else: return next_index - 1 def get_report(self, index: int, block_hash: tp.Optional[str] = None) -> tp.Optional[ReportTyping]: """ Fetch information about existing liability reports. :param index: Reported liability item index. :param block_hash: block_hash: Retrieves data as of passed block hash. :return: Liability report information: ``index``, ``promisor``, ``report``, ``signature``. ``None`` if no such liability report. """ logger.info(f"Fetching information about reported liability with index {index}") return self._service_functions.chainstate_query("Liability", "ReportOf", index, block_hash=block_hash) def create( self, technics_hash: str, economics: int, promisee: str, promisor: str, promisee_params_signature: str, promisor_params_signature: str, nonce: tp.Optional[int] = None, promisee_signature_crypto_type: int = KeypairType.SR25519, promisor_signature_crypto_type: int = KeypairType.SR25519, ) -> tp.Tuple[int, str]: """ Create a liability to ensure economical relationships between robots! This is a contract to be assigned to a ``promisor`` by ``promisee``. As soon as the job is done and reported, the ``promisor`` gets his reward. This extrinsic may be submitted by another address, but there should be ``promisee`` and ``promisor`` signatures. :param technics_hash: Details of the liability, where the ``promisee`` order is described. Accepts any 32-bytes data or a base58 (``Qm...``) IPFS hash. :param economics: ``Promisor`` reward in Weiners. :param promisee: ``Promisee`` (customer) ss58 address :param promisor: ``Promisor`` (worker) ss58 address :param promisee_params_signature: An agreement proof. This is a private key signed message containing ``technics`` and ``economics``. Both sides need to do this. Signed by ``promisee``. :param promisor_params_signature: An agreement proof. This is a private key signed message containing ``technics`` and ``economics``. Both sides need to do this. Signed by ``promisor``. :param nonce: Account nonce. Due to the feature of substrate-interface lib, to create an extrinsic with incremented nonce, pass account's current nonce. See https://github.com/polkascan/py-substrate-interface/blob/85a52b1c8f22e81277907f82d807210747c6c583/substrateinterface/base.py#L1535 for example. :param promisee_signature_crypto_type: Crypto type used to create promisee account. :param promisor_signature_crypto_type: Crypto type used to create promisor account. :return: New liability index and hash of the liability creation transaction. """ logger.info( f"Creating new liability with promisee {promisee}, promisor {promisor}, technics {technics_hash} and " f"economics {economics}." ) if technics_hash.startswith("Qm"): technics_hash = ipfs_qm_hash_to_32_bytes(technics_hash) liability_creation_transaction_hash: str = self._service_functions.extrinsic( "Liability", "create", { "agreement": { "technics": {"hash": technics_hash}, "economics": {"price": economics}, "promisee": promisee, "promisor": promisor, "promisee_signature": {KEYPAIR_TYPE[promisee_signature_crypto_type]: promisee_params_signature}, "promisor_signature": {KEYPAIR_TYPE[promisor_signature_crypto_type]: promisor_params_signature}, } }, nonce=nonce, ) latest_index: int = self.get_latest_index() if not latest_index: latest_index = 0 return latest_index, liability_creation_transaction_hash index: int = latest_index for liabilities in reversed(range(latest_index + 1)): if ( self.get_agreement(liabilities)["promisee_signature"][KEYPAIR_TYPE[promisee_signature_crypto_type]] == promisee_params_signature ): index = liabilities break return index, liability_creation_transaction_hash def sign_liability(self, technics_hash: str, economics: int) -> str: """ Sign liability params approve message with a private key. This function is meant to sign ``technics`` and ``economics``details message to state the agreement of ``promisee`` and ``promisor``. Both sides need to do this. :param technics_hash: Details of the liability, where the ``promisee`` order is described. Accepts any 32-bytes data or a base58 (``Qm...``) IPFS hash. :param economics: ``Promisor`` reward in Weiners. :return: Signed message 64-byte hash in sting form. """ if not self.account.keypair: raise NoPrivateKeyException("No private key, unable to sign a liability") if technics_hash.startswith("Qm"): technics_hash = ipfs_qm_hash_to_32_bytes(technics_hash) logger.info(f"Signing proof with technics {technics_hash} and economics {economics}.") data_to_sign: ScaleBytes = str_to_scalebytes(technics_hash, "H256") + str_to_scalebytes( economics, "Compact<Balance>" ) return f"0x{self.account.keypair.sign(data_to_sign).hex()}" def finalize( self, index: int, report_hash: str, promisor: tp.Optional[str] = None, promisor_signature_crypto_type: int = KeypairType.SR25519, promisor_finalize_signature: tp.Optional[str] = None, nonce: tp.Optional[int] = None, ) -> str: """ Report on a completed job to receive a deserved award. This may be done by another address, but there should be a liability ``promisor`` signature. :param index: Liability item index. :param report_hash: IPFS hash of a report data (videos, text, etc.). Accepts any 32-bytes data or a base58 (``Qm...``) IPFS hash. :param promisor: ``Promisor`` (worker) ss58 address. If not passed, replaced with transaction author address. :param promisor_signature_crypto_type: Crypto type used to create promisor account. :param promisor_finalize_signature: 'Job done' proof. A message containing liability index and report data signed by ``promisor``. If not passed, this message is signed by a transaction author which should be a ``promisor`` so. :param nonce: Account nonce. Due to the feature of substrate-interface lib, to create an extrinsic with incremented nonce, pass account's current nonce. See https://github.com/polkascan/py-substrate-interface/blob/85a52b1c8f22e81277907f82d807210747c6c583/substrateinterface/base.py#L1535 for example. :return: Liability finalization transaction hash """ logger.info(f"Finalizing liability {index} by promisor {promisor or self.account.get_address()}.") if report_hash.startswith("Qm"): report_hash = ipfs_qm_hash_to_32_bytes(report_hash) return self._service_functions.extrinsic( "Liability", "finalize", { "report": { "index": index, "sender": promisor or self.account.get_address(), "payload": {"hash": report_hash}, "signature": { KEYPAIR_TYPE[promisor_signature_crypto_type]: promisor_finalize_signature or self.sign_report(index, report_hash) }, } }, nonce=nonce, ) def sign_report(self, index: int, report_hash: str) -> str: """ Sing liability finalization parameters proof message with a private key. This is meant to state that the job is done by ``promisor``. :param index: Liability item index. :param report_hash: IPFS hash of a report data (videos, text, etc.). Accepts any 32-bytes data or a base58 (``Qm...``) IPFS hash. :return: Signed message 64-byte hash in sting form. """ if not self.account.keypair: raise NoPrivateKeyException("No private key, unable to sign a report") if report_hash.startswith("Qm"): report_hash = ipfs_qm_hash_to_32_bytes(report_hash) logger.info(f"Signing report for liability {index} with report_hash {report_hash}.") data_to_sign: ScaleBytes = str_to_scalebytes(index, "U32") + str_to_scalebytes(report_hash, "H256") return f"0x{self.account.keypair.sign(data_to_sign).hex()}"
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/classes/liability.py
0.80213
0.369571
liability.py
pypi
import typing as tp from logging import getLogger from .base import BaseClass from ..types import AccountTyping logger = getLogger(__name__) class CommonFunctions(BaseClass): """ Class for common functions such as getting account information or transferring tokens """ def get_account_info(self, addr: tp.Optional[str] = None, block_hash: tp.Optional[str] = None) -> AccountTyping: """ Get account information. :param addr: Explored account ss58 address. Account dataclass address if None. :param block_hash: Retrieves data as of passed block hash. :return: Account information dictionary. """ account_address: str = addr or self.account.get_address() logger.info(f"Getting account {account_address} data") return self._service_functions.chainstate_query("System", "Account", account_address, block_hash=block_hash) def get_account_nonce(self, addr: tp.Optional[str] = None) -> int: """ Get current account nonce. :param addr: Account ss58 address. Self address via private key is obtained if not passed. :return Account nonce. Due to the feature of substrate-interface lib, to create an extrinsic with incremented nonce, pass account's current nonce. See https://github.com/polkascan/py-substrate-interface/blob/85a52b1c8f22e81277907f82d807210747c6c583/substrateinterface/base.py#L1535 for example. """ account_address: str = addr or self.account.get_address() logger.info(f"Fetching nonce of account {account_address}") return self._service_functions.rpc_request( "system_accountNextIndex", [account_address], result_handler=None ).get("result", 0) def transfer_tokens(self, target_address: str, tokens: int, nonce: tp.Optional[int] = None) -> str: """ Send tokens to target address. :param target_address: Account that will receive tokens. :param tokens: Number of tokens to be sent, in Wei, so if you want to send 1 XRT, you should send "1 000 000 000" units. :param nonce: Account nonce. Due to the feature of substrate-interface lib, to create an extrinsic with incremented nonce, pass account's current nonce. See https://github.com/polkascan/py-substrate-interface/blob/85a52b1c8f22e81277907f82d807210747c6c583/substrateinterface/base.py#L1535 for example. :return: Hash of the transfer transaction. """ logger.info(f"Sending tokens to {target_address}") return self._service_functions.extrinsic( "Balances", "transfer", {"dest": {"Id": target_address}, "value": tokens}, nonce, )
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/classes/common_functions.py
0.814053
0.33039
common_functions.py
pypi
import typing as tp from dataclasses import dataclass from logging import getLogger from substrateinterface import Keypair, KeypairType from ..constants import REMOTE_WS, TYPE_REGISTRY from ..exceptions import NoPrivateKeyException from ..types import TypeRegistryTyping from ..utils import create_keypair logger = getLogger(__name__) @dataclass class Account: """ Dataclass to hold account info and node connection parameters """ def __init__( self, seed: tp.Optional[str] = None, remote_ws: tp.Optional[str] = None, type_registry: tp.Optional[TypeRegistryTyping] = None, crypto_type: int = KeypairType.SR25519, ) -> None: """ Save node connection parameters and create a keypair to sign transactions and define address if seed was passed as a parameter. :param seed: Account seed (mnemonic or raw) as a key to sign transactions. :param remote_ws: Node url. Default node address is "wss://kusama.rpc.robonomics.network". Another address may be specified (e.g. "ws://127.0.0.1:9944" for local node). :param type_registry: Types used in the chain. Defaults are the most frequently used in Robonomics. :param crypto_type: Use KeypairType.SR25519 or KeypairType.ED25519 cryptography for generating the Keypair. """ self.remote_ws: str = remote_ws or REMOTE_WS self.type_registry: TypeRegistryTyping = type_registry or TYPE_REGISTRY if seed: self.keypair: Keypair = create_keypair(seed, crypto_type) self._address: str = self.keypair.ss58_address else: self.keypair = None def get_address(self) -> str: """ Determine account address if seed was passed when creating an instance :return: Account ss58 address """ if not self.keypair: raise NoPrivateKeyException("No private key was provided, unable to determine account address") return str(self.keypair.ss58_address)
/robonomics_interface-1.6.1-py3-none-any.whl/robonomicsinterface/classes/account.py
0.684791
0.183502
account.py
pypi
import csv from contextlib import contextmanager import requests from roboparse import Parser, BaseRouter class HabrFilters: def _fb_sort_data(self, data): return [element for element in data if element is not None] class HabrRouter(BaseRouter, HabrFilters): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) @contextmanager def login(self): """ Implement there login functionality """ def get_news(self): """ Create router response and return it """ response = self.create_router_response( path="https://habr.com/ru/", # Path is just meta data. It uses for nothing linter={ "type": "LIST", "tag": "li", "attrs": {"class": "content-list__item"}, "children": { "type": "ELEMENT", "tag": "h2", "attrs": {"class": "post__title"}, "children": { "type": "ELEMENT", "tag": "a", "attrs": {"class": "post__title_link"} } } } ) return response def write_csv(data): with open("news.csv", "a+") as f: writer = csv.writer(f) writer.writerow(("Title", "Link")) for item in data: writer.writerow( (item.text.strip(), item.get("href")) ) def scrape_news(): parser = Parser() router = HabrRouter(username="someUserName", password="somePassWord") with requests.Session() as session: html = session.get("https://habr.com/ru/", headers={ "accept": "*/*", "user-agent": ( "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/86.0.4240.111 Safari/537.36" ) }) data = parser.load(html.content, router.get_news(), router.filters) write_csv(data) if __name__ == '__main__': scrape_news()
/roboparse-0.0.2.tar.gz/roboparse-0.0.2/example/scraper.py
0.491944
0.223366
scraper.py
pypi
import abc import enum from typing import Sequence import numpy as np from dm_control import composer, mjcf from dm_control.composer.observation import observable from mujoco_utils import types @enum.unique class HandSide(enum.Enum): """Which hand side is being modeled.""" LEFT = enum.auto() RIGHT = enum.auto() class Hand(composer.Entity, abc.ABC): """Base composer class for dexterous hands.""" def _build_observables(self) -> "HandObservables": return HandObservables(self) @property @abc.abstractmethod def name(self) -> str: ... @property @abc.abstractmethod def hand_side(self) -> HandSide: ... @property @abc.abstractmethod def root_body(self) -> types.MjcfElement: ... @property @abc.abstractmethod def joints(self) -> Sequence[types.MjcfElement]: ... @property @abc.abstractmethod def actuators(self) -> Sequence[types.MjcfElement]: ... @property @abc.abstractmethod def fingertip_sites(self) -> Sequence[types.MjcfElement]: ... class HandObservables(composer.Observables): """Base class for dexterous hand observables.""" _entity: Hand @composer.observable def joints_pos(self): """Returns the joint positions.""" return observable.MJCFFeature("qpos", self._entity.joints) @composer.observable def joints_pos_cos_sin(self): """Returns the joint positions encoded as (cos, sin) pairs. This has twice as many dimensions as the raw joint positions. """ def _get_joint_angles(physics: mjcf.Physics) -> np.ndarray: qpos = physics.bind(self._entity.joints).qpos return np.hstack([np.cos(qpos), np.sin(qpos)]) return observable.Generic(raw_observation_callable=_get_joint_angles) @composer.observable def joints_vel(self): """Returns the joint velocities.""" return observable.MJCFFeature("qvel", self._entity.joints) @composer.observable def joints_torque(self) -> observable.Generic: """Returns the joint torques.""" def _get_joint_torques(physics: mjcf.Physics) -> np.ndarray: # We only care about torques acting on each joint's axis of rotation, so we # project them. torques = physics.bind(self._entity.joint_torque_sensors).sensordata joint_axes = physics.bind(self._entity.joints).axis return np.einsum("ij,ij->i", torques.reshape(-1, 3), joint_axes) return observable.Generic(raw_observation_callable=_get_joint_torques) @composer.observable def position(self): """Returns the position of the hand's root body in the world frame.""" return observable.MJCFFeature("xpos", self._entity.root_body)
/models/hands/base.py
0.939796
0.448004
base.py
pypi
from dataclasses import dataclass from typing import Dict, Optional, Sequence, Tuple import numpy as np from dm_control import composer, mjcf from dm_control.composer.observation import observable from dm_env import specs from mujoco_utils import mjcf_utils, physics_utils, spec_utils, types from robopianist.models.hands import base from robopianist.models.hands import shadow_hand_constants as consts @dataclass(frozen=True) class Dof: """Forearm degree of freedom.""" joint_type: str axis: Tuple[int, int, int] stiffness: float joint_range: Tuple[float, float] reflect: bool = False _FOREARM_DOFS: Dict[str, Dof] = { "forearm_tx": Dof( joint_type="slide", axis=(-1, 0, 0), stiffness=300, # Note this is a dummy range, it will be set to the piano's length at task # initialization, see `robopianist/suite/tasks/base.py`. joint_range=(-1, 1), ), "forearm_ty": Dof( joint_type="slide", axis=(0, 0, 1), stiffness=300, joint_range=(0.0, 0.06) ), # "forearm_tz": Dof( # joint_type="slide", axis=(0, 1, 0), stiffness=1000, joint_range=(-0.04, 0.0) # ), # "forearm_roll": Dof( # joint_type="hinge", axis=(0, 0, 1), stiffness=300, joint_range=(-0.25, 0.25) # ), # "forearm_pitch": Dof( # joint_type="hinge", axis=(1, 0, 0), stiffness=50, joint_range=(0, 0.15) # ), # "forearm_yaw": Dof( # joint_type="hinge", # axis=(0, -1, 0), # stiffness=300, # joint_range=(-0.25, 0.25), # reflect=True, # ), } _RESTRICTED_WRJ2_RANGE: Tuple[float, float] = (-0.174533, 0.174533) _REDUCED_ACTION_SPACE_EXCLUDED_DOFS: Tuple[str, ...] = ( "A_THJ5", "A_THJ3", "A_THJ1", "A_FFJ4", "A_MFJ4", "A_RFJ4", "A_LFJ5", "A_LFJ4", ) _REDUCED_THUMB_RANGE: Tuple[float, float] = (0.0, 0.698132) _FINGERTIP_OFFSET = 0.026 _THUMBTIP_OFFSET = 0.0275 class ShadowHand(base.Hand): """A Shadow Hand E3M5.""" def _build( self, name: Optional[str] = None, side: base.HandSide = base.HandSide.RIGHT, primitive_fingertip_collisions: bool = False, restrict_wrist_yaw_range: bool = False, add_dofs: bool = True, reduced_action_space: bool = False, ) -> None: """Initializes a ShadowHand. Args: name: Name of the hand. Used as a prefix in the MJCF name attributes. side: Which side (left or right) to model. primitive_fingertip_collisions: Whether to use capsule approximations for the fingertip colliders or the true meshes. Using primitive colliders speeds up the simulation. restrict_wrist_yaw_range: Whether to restrict the range of the wrist yaw joint. add_dofs: Whether to add the forearm degrees of freedom. reduced_action_space: Whether to use a reduced action space. """ if side == base.HandSide.RIGHT: self._prefix = "rh_" xml_file = consts.RIGHT_SHADOW_HAND_XML elif side == base.HandSide.LEFT: self._prefix = "lh_" xml_file = consts.LEFT_SHADOW_HAND_XML name = name or self._prefix + "shadow_hand" self._hand_side = side self._mjcf_root = mjcf.from_path(str(xml_file)) self._mjcf_root.model = name self._n_forearm_dofs = 0 self._reduce_action_space = reduced_action_space if restrict_wrist_yaw_range: joint = mjcf_utils.safe_find( self._mjcf_root, "joint", self._prefix + "WRJ2" ) joint.range = _RESTRICTED_WRJ2_RANGE actuator = mjcf_utils.safe_find( self._mjcf_root, "actuator", self._prefix + "A_WRJ2" ) actuator.ctrlrange = _RESTRICTED_WRJ2_RANGE # Important: call before parsing. if add_dofs: self._add_dofs() self._parse_mjcf_elements() self._add_mjcf_elements() if primitive_fingertip_collisions: for geom in self._mjcf_root.find_all("geom"): if ( geom.dclass.dclass == "plastic_collision" and geom.mesh is not None and geom.mesh.name is not None and geom.mesh.name.endswith("distal_pst") ): geom.type = "capsule" self._action_spec = None def _build_observables(self) -> "ShadowHandObservables": return ShadowHandObservables(self) def _parse_mjcf_elements(self) -> None: joints = mjcf_utils.safe_find_all(self._mjcf_root, "joint") actuators = mjcf_utils.safe_find_all(self._mjcf_root, "actuator") if self._reduce_action_space: # Disable some actuators (keeping the joints). for act_name in _REDUCED_ACTION_SPACE_EXCLUDED_DOFS: act = [a for a in actuators if a.name == self._prefix + act_name][0] actuators.remove(act) act.remove() # Reduce THJ2 range. joint = mjcf_utils.safe_find( self._mjcf_root, "joint", self._prefix + "THJ2" ) joint.range = _REDUCED_THUMB_RANGE actuator = mjcf_utils.safe_find( self._mjcf_root, "actuator", self._prefix + "A_THJ2" ) actuator.ctrlrange = _REDUCED_THUMB_RANGE # Store indices of joints associated with disabled actuators. names = [self._prefix + n[2:] for n in _REDUCED_ACTION_SPACE_EXCLUDED_DOFS] self._disabled_idxs = [i for i, j in enumerate(joints) if j.name in names] self._joints = tuple(joints) self._actuators = tuple(actuators) def _add_mjcf_elements(self) -> None: # Add sites to the tips of the fingers. fingertip_sites = [] for tip_name in consts.FINGERTIP_BODIES: tip_elem = mjcf_utils.safe_find( self._mjcf_root, "body", self._prefix + tip_name ) offset = _THUMBTIP_OFFSET if tip_name == "thdistal" else _FINGERTIP_OFFSET tip_site = tip_elem.add( "site", name=tip_name + "_site", pos=(0.0, 0.0, offset), type="sphere", size=(0.004,), group=composer.SENSOR_SITES_GROUP, ) fingertip_sites.append(tip_site) self._fingertip_sites = tuple(fingertip_sites) # Add joint torque sensors. joint_torque_sensors = [] for joint_elem in self._joints: site_elem = joint_elem.parent.add( "site", name=joint_elem.name + "_site", size=(0.001, 0.001, 0.001), type="box", rgba=(0, 1, 0, 1), group=composer.SENSOR_SITES_GROUP, ) torque_sensor_elem = joint_elem.root.sensor.add( "torque", site=site_elem, name=joint_elem.name + "_torque", ) joint_torque_sensors.append(torque_sensor_elem) self._joint_torque_sensors = tuple(joint_torque_sensors) # Add velocity and force sensors to the actuators. actuator_velocity_sensors = [] actuator_force_sensors = [] for actuator_elem in self._actuators: velocity_sensor_elem = self._mjcf_root.sensor.add( "actuatorvel", actuator=actuator_elem, name=actuator_elem.name + "_velocity", ) actuator_velocity_sensors.append(velocity_sensor_elem) force_sensor_elem = self._mjcf_root.sensor.add( "actuatorfrc", actuator=actuator_elem, name=actuator_elem.name + "_force", ) actuator_force_sensors.append(force_sensor_elem) self._actuator_velocity_sensors = tuple(actuator_velocity_sensors) self._actuator_force_sensors = tuple(actuator_force_sensors) def _add_dofs(self) -> None: """Add forearm degrees of freedom.""" def _maybe_reflect_axis( axis: Sequence[float], reflect: bool ) -> Sequence[float]: if self._hand_side == base.HandSide.LEFT and reflect: return tuple([-a for a in axis]) return axis for dof_name, dof in _FOREARM_DOFS.items(): joint = self.root_body.add( "joint", type=dof.joint_type, name=dof_name, axis=_maybe_reflect_axis(dof.axis, dof.reflect), range=dof.joint_range, ) joint.damping = physics_utils.get_critical_damping_from_stiffness( dof.stiffness, joint.full_identifier, self.mjcf_model ) self._mjcf_root.actuator.add( "position", name=dof_name, joint=joint, ctrlrange=dof.joint_range, kp=dof.stiffness, ) self._n_forearm_dofs += 1 # Accessors. @property def hand_side(self) -> base.HandSide: return self._hand_side @property def mjcf_model(self) -> types.MjcfRootElement: return self._mjcf_root @property def name(self) -> str: return self._mjcf_root.model @property def n_forearm_dofs(self) -> int: return self._n_forearm_dofs @composer.cached_property def root_body(self) -> types.MjcfElement: return mjcf_utils.safe_find(self._mjcf_root, "body", self._prefix + "forearm") @composer.cached_property def fingertip_bodies(self) -> Sequence[types.MjcfElement]: return tuple( mjcf_utils.safe_find(self._mjcf_root, "body", self._prefix + name) for name in consts.FINGERTIP_BODIES ) @property def joints(self) -> Sequence[types.MjcfElement]: return self._joints @property def actuators(self) -> Sequence[types.MjcfElement]: return self._actuators @property def joint_torque_sensors(self) -> Sequence[types.MjcfElement]: return self._joint_torque_sensors @property def fingertip_sites(self) -> Sequence[types.MjcfElement]: return self._fingertip_sites @property def actuator_velocity_sensors(self) -> Sequence[types.MjcfElement]: return self._actuator_velocity_sensors @property def actuator_force_sensors(self) -> Sequence[types.MjcfElement]: return self._actuator_force_sensors # Action specs. def action_spec(self, physics: mjcf.Physics) -> specs.BoundedArray: if self._action_spec is None: self._action_spec = spec_utils.create_action_spec( physics=physics, actuators=self.actuators, prefix=self.name ) return self._action_spec def apply_action( self, physics: mjcf.Physics, action: np.ndarray, random_state: np.random.RandomState, ) -> None: del random_state # Unused. physics.bind(self.actuators).ctrl = action def initialize_episode( self, physics: mjcf.Physics, random_state: np.random.RandomState ) -> None: del random_state # Unused. if self._reduce_action_space: physics.bind([self.joints[i] for i in self._disabled_idxs]).qpos = 0.0 def after_step( self, physics: mjcf.Physics, random_state: np.random.RandomState ) -> None: del random_state # Unused. if self._reduce_action_space: physics.bind([self.joints[i] for i in self._disabled_idxs]).qpos = 0.0 class ShadowHandObservables(base.HandObservables): """ShadowHand observables.""" _entity: ShadowHand @composer.observable def actuators_force(self): """Returns the actuator forces.""" return observable.MJCFFeature("sensordata", self._entity.actuator_force_sensors) @composer.observable def actuators_velocity(self): """Returns the actuator velocities.""" return observable.MJCFFeature( "sensordata", self._entity.actuator_velocity_sensors ) @composer.observable def actuators_power(self): """Returns the actuator powers.""" def _get_actuator_power(physics: mjcf.Physics) -> np.ndarray: force = physics.bind(self._entity.actuator_force_sensors).sensordata velocity = physics.bind(self._entity.actuator_velocity_sensors).sensordata return abs(force) * abs(velocity) return observable.Generic(raw_observation_callable=_get_actuator_power) @composer.observable def fingertip_positions(self): """Returns the fingertip positions in world coordinates.""" def _get_fingertip_positions(physics: mjcf.Physics) -> np.ndarray: return physics.bind(self._entity.fingertip_sites).xpos.ravel() return observable.Generic(raw_observation_callable=_get_fingertip_positions)
/models/hands/shadow_hand.py
0.932553
0.410934
shadow_hand.py
pypi
from math import atan NUM_KEYS = 88 NUM_WHITE_KEYS = 52 WHITE_KEY_WIDTH = 0.0225 WHITE_KEY_LENGTH = 0.15 WHITE_KEY_HEIGHT = WHITE_KEY_WIDTH SPACING_BETWEEN_WHITE_KEYS = 0.001 N_SPACES_BETWEEN_WHITE_KEYS = NUM_WHITE_KEYS - 1 BLACK_KEY_WIDTH = 0.01 BLACK_KEY_LENGTH = 0.09 # Unlike the other dimensions, the height of the black key was roughly set such that # when a white key is fully depressed, the bottom of the black key is barely visible. BLACK_KEY_HEIGHT = 0.018 PIANO_LENGTH = (NUM_WHITE_KEYS * WHITE_KEY_WIDTH) + ( N_SPACES_BETWEEN_WHITE_KEYS * SPACING_BETWEEN_WHITE_KEYS ) WHITE_KEY_X_OFFSET = 0 WHITE_KEY_Z_OFFSET = WHITE_KEY_HEIGHT / 2 BLACK_KEY_X_OFFSET = -WHITE_KEY_LENGTH / 2 + BLACK_KEY_LENGTH / 2 # The top of the black key should be 12.5 mm above the top of the white key. BLACK_OFFSET_FROM_WHITE = 0.0125 BLACK_KEY_Z_OFFSET = WHITE_KEY_HEIGHT + BLACK_OFFSET_FROM_WHITE - BLACK_KEY_HEIGHT / 2 BASE_HEIGHT = 0.04 BASE_LENGTH = 0.1 BASE_WIDTH = PIANO_LENGTH BASE_SIZE = [BASE_LENGTH / 2, BASE_WIDTH / 2, BASE_HEIGHT / 2] BASE_X_OFFSET = -WHITE_KEY_LENGTH / 2 - 0.5 * BASE_LENGTH - 0.002 BASE_POS = [BASE_X_OFFSET, 0, BASE_HEIGHT / 2] # A key is designed to travel downward 3/8 of an inch (roughly 10mm). # Assuming the joint is positioned at the back of the key, we can write: # tan(θ) = d / l, where d is the distance the key travels and l is the length of the # key. Solving for θ, we get: θ = arctan(d / l). WHITE_KEY_TRAVEL_DISTANCE = 0.01 WHITE_KEY_JOINT_MAX_ANGLE = atan(WHITE_KEY_TRAVEL_DISTANCE / WHITE_KEY_LENGTH) # TODO(kevin): Figure out black key travel distance. BLACK_KEY_TRAVEL_DISTANCE = 0.008 BLACK_KEY_JOINT_MAX_ANGLE = atan(BLACK_KEY_TRAVEL_DISTANCE / BLACK_KEY_LENGTH) # Mass in kg. WHITE_KEY_MASS = 0.04 BLACK_KEY_MASS = 0.02 # Joint spring reference, in degrees. # At equilibrium, the joint should be at 0 degrees. WHITE_KEY_SPRINGREF = -1 BLACK_KEY_SPRINGREF = -1 # Joint spring stiffness, in Nm/rad. # The spring should be stiff enough to support the weight of the key at equilibrium. WHITE_KEY_STIFFNESS = 2 BLACK_KEY_STIFFNESS = 2 # Joint damping and armature for smoothing key motion. WHITE_JOINT_DAMPING = 0.05 BLACK_JOINT_DAMPING = 0.05 WHITE_JOINT_ARMATURE = 0.001 BLACK_JOINT_ARMATURE = 0.001 # Actuator parameters (for self-actuated only). ACTUATOR_DYNPRM = 1 ACTUATOR_GAINPRM = 1 # Colors. WHITE_KEY_COLOR = [0.9, 0.9, 0.9, 1] BLACK_KEY_COLOR = [0.1, 0.1, 0.1, 1] BASE_COLOR = [0.15, 0.15, 0.15, 1]
/models/piano/piano_constants.py
0.524882
0.24513
piano_constants.py
pypi
import math from dm_control import mjcf from mujoco_utils import types from robopianist.models.piano import piano_constants as consts def build(add_actuators: bool = False) -> types.MjcfRootElement: """Programatically build a piano MJCF. Args: add_actuators: Whether to add actuators to the piano keys. """ root = mjcf.RootElement() root.model = "piano" root.compiler.autolimits = True root.compiler.angle = "radian" # Add materials. root.asset.add("material", name="white", rgba=consts.WHITE_KEY_COLOR) root.asset.add("material", name="black", rgba=consts.BLACK_KEY_COLOR) root.default.geom.type = "box" root.default.joint.type = "hinge" root.default.joint.axis = [0, 1, 0] root.default.site.type = "box" root.default.site.group = 4 root.default.site.rgba = [1, 0, 0, 1] # This effectively disables key-key collisions but still allows hand-key collisions, # assuming we've kept the default hand contype = conaffinity = 1. # See https://mujoco.readthedocs.io/en/latest/computation.html#selection for more # details. root.default.geom.contype = 0 root.default.geom.conaffinity = 1 # Actuator defaults (torque control). if add_actuators: root.default.general.dyntype = "none" root.default.general.dynprm = (consts.ACTUATOR_DYNPRM, 0, 0) root.default.general.gaintype = "fixed" root.default.general.gainprm = (consts.ACTUATOR_GAINPRM, 0, 0) root.default.general.biastype = "none" root.default.general.biasprm = (0, 0, 0) # White key defaults. white_default = root.default.add("default", dclass="white_key") white_default.geom.material = "white" white_default.geom.size = [ consts.WHITE_KEY_LENGTH / 2, consts.WHITE_KEY_WIDTH / 2, consts.WHITE_KEY_HEIGHT / 2, ] white_default.geom.mass = consts.WHITE_KEY_MASS white_default.site.size = white_default.geom.size white_default.joint.pos = [-consts.WHITE_KEY_LENGTH / 2, 0, 0] white_default.joint.damping = consts.WHITE_JOINT_DAMPING white_default.joint.armature = consts.WHITE_JOINT_ARMATURE white_default.joint.stiffness = consts.WHITE_KEY_STIFFNESS white_default.joint.springref = consts.WHITE_KEY_SPRINGREF * math.pi / 180 white_default.joint.range = [0, consts.WHITE_KEY_JOINT_MAX_ANGLE] if add_actuators: white_default.general.ctrlrange = [0, consts.WHITE_KEY_JOINT_MAX_ANGLE] # Black key defaults. black_default = root.default.add("default", dclass="black_key") black_default.geom.material = "black" black_default.geom.size = [ consts.BLACK_KEY_LENGTH / 2, consts.BLACK_KEY_WIDTH / 2, consts.BLACK_KEY_HEIGHT / 2, ] black_default.site.size = black_default.geom.size black_default.geom.mass = consts.BLACK_KEY_MASS black_default.joint.pos = [-consts.BLACK_KEY_LENGTH / 2, 0, 0] black_default.joint.damping = consts.BLACK_JOINT_DAMPING black_default.joint.armature = consts.BLACK_JOINT_ARMATURE black_default.joint.stiffness = consts.BLACK_KEY_STIFFNESS black_default.joint.springref = consts.BLACK_KEY_SPRINGREF * math.pi / 180 black_default.joint.range = [0, consts.BLACK_KEY_JOINT_MAX_ANGLE] if add_actuators: black_default.general.ctrlrange = [0, consts.BLACK_KEY_JOINT_MAX_ANGLE] # Add base. base_body = root.worldbody.add("body", name="base", pos=consts.BASE_POS) base_body.add("geom", type="box", size=consts.BASE_SIZE, rgba=consts.BASE_COLOR) WHITE_KEY_INDICES = [ 0, 2, 3, 5, 7, 8, 10, 12, 14, 15, 17, 19, 20, 22, 24, 26, 27, 29, 31, 32, 34, 36, 38, 39, 41, 43, 44, 46, 48, 50, 51, 53, 55, 56, 58, 60, 62, 63, 65, 67, 68, 70, 72, 74, 75, 77, 79, 80, 82, 84, 86, 87, ] # These will hold kwargs. We'll subsequently use them to create the actual objects. geoms = [] bodies = [] joints = [] sites = [] actuators = [] for i in range(consts.NUM_WHITE_KEYS): y_coord = ( -consts.PIANO_LENGTH * 0.5 + consts.WHITE_KEY_WIDTH * 0.5 + i * (consts.WHITE_KEY_WIDTH + consts.SPACING_BETWEEN_WHITE_KEYS) ) bodies.append( { "name": f"white_key_{WHITE_KEY_INDICES[i]}", "pos": [consts.WHITE_KEY_X_OFFSET, y_coord, consts.WHITE_KEY_Z_OFFSET], } ) geoms.append( { "name": f"white_key_geom_{WHITE_KEY_INDICES[i]}", "dclass": "white_key", } ) joints.append( { "name": f"white_joint_{WHITE_KEY_INDICES[i]}", "dclass": "white_key", } ) sites.append( { "name": f"white_key_site_{WHITE_KEY_INDICES[i]}", "dclass": "white_key", } ) if add_actuators: actuators.append( { "joint": f"white_joint_{WHITE_KEY_INDICES[i]}", "name": f"white_actuator_{WHITE_KEY_INDICES[i]}", "dclass": "white_key", } ) BLACK_TWIN_KEY_INDICES = [ 4, 6, 16, 18, 28, 30, 40, 42, 52, 54, 64, 66, 76, 78, ] BLACK_TRIPLET_KEY_INDICES = [ 1, 9, 11, 13, 21, 23, 25, 33, 35, 37, 45, 47, 49, 57, 59, 61, 69, 71, 73, 81, 83, 85, ] # Place the lone black key on the far left. y_coord = consts.WHITE_KEY_WIDTH + 0.5 * ( -consts.PIANO_LENGTH + consts.SPACING_BETWEEN_WHITE_KEYS ) bodies.append( { "name": f"black_key_{BLACK_TRIPLET_KEY_INDICES[0]}", "pos": [consts.BLACK_KEY_X_OFFSET, y_coord, consts.BLACK_KEY_Z_OFFSET], } ) geoms.append( { "name": f"black_key_geom_{BLACK_TRIPLET_KEY_INDICES[0]}", "dclass": "black_key", } ) joints.append( { "name": f"black_joint_{BLACK_TRIPLET_KEY_INDICES[0]}", "dclass": "black_key", } ) sites.append( { "name": f"black_key_site_{BLACK_TRIPLET_KEY_INDICES[0]}", "dclass": "black_key", } ) if add_actuators: actuators.append( { "joint": f"black_joint_{BLACK_TRIPLET_KEY_INDICES[0]}", "name": f"black_actuator_{BLACK_TRIPLET_KEY_INDICES[0]}", "dclass": "black_key", } ) # Place the twin black keys. n = 0 TWIN_INDICES = list(range(2, consts.NUM_WHITE_KEYS - 1, 7)) for twin_index in TWIN_INDICES: for j in range(2): y_coord = ( -consts.PIANO_LENGTH * 0.5 + (j + 1) * (consts.WHITE_KEY_WIDTH + consts.SPACING_BETWEEN_WHITE_KEYS) + twin_index * (consts.WHITE_KEY_WIDTH + consts.SPACING_BETWEEN_WHITE_KEYS) ) bodies.append( { "name": f"black_key_{BLACK_TWIN_KEY_INDICES[n]}", "pos": [ consts.BLACK_KEY_X_OFFSET, y_coord, consts.BLACK_KEY_Z_OFFSET, ], } ) geoms.append( { "name": f"black_key_geom_{BLACK_TWIN_KEY_INDICES[n]}", "dclass": "black_key", } ) joints.append( { "name": f"black_joint_{BLACK_TWIN_KEY_INDICES[n]}", "dclass": "black_key", } ) sites.append( { "name": f"black_key_site_{BLACK_TWIN_KEY_INDICES[n]}", "dclass": "black_key", } ) if add_actuators: actuators.append( { "joint": f"black_joint_{BLACK_TWIN_KEY_INDICES[n]}", "name": f"black_actuator_{BLACK_TWIN_KEY_INDICES[n]}", "dclass": "black_key", } ) n += 1 # Place the triplet black keys. n = 1 # Skip the lone black key. TRIPLET_INDICES = list(range(5, consts.NUM_WHITE_KEYS - 1, 7)) for triplet_index in TRIPLET_INDICES: for j in range(3): y_coord = ( -consts.PIANO_LENGTH * 0.5 + (j + 1) * (consts.WHITE_KEY_WIDTH + consts.SPACING_BETWEEN_WHITE_KEYS) + triplet_index * (consts.WHITE_KEY_WIDTH + consts.SPACING_BETWEEN_WHITE_KEYS) ) bodies.append( { "name": f"black_key_{BLACK_TRIPLET_KEY_INDICES[n]}", "pos": [ consts.BLACK_KEY_X_OFFSET, y_coord, consts.BLACK_KEY_Z_OFFSET, ], } ) geoms.append( { "name": f"black_key_geom_{BLACK_TRIPLET_KEY_INDICES[n]}", "dclass": "black_key", } ) joints.append( { "name": f"black_joint_{BLACK_TRIPLET_KEY_INDICES[n]}", "dclass": "black_key", } ) sites.append( { "name": f"black_key_site_{BLACK_TRIPLET_KEY_INDICES[n]}", "dclass": "black_key", } ) if add_actuators: actuators.append( { "joint": f"black_joint_{BLACK_TRIPLET_KEY_INDICES[n]}", "name": f"black_actuator_{BLACK_TRIPLET_KEY_INDICES[n]}", "dclass": "black_key", } ) n += 1 # Sort the elements based on the key number. names: list[str] = [body["name"] for body in bodies] # type: ignore indices = sorted(range(len(names)), key=lambda k: int(names[k].split("_")[-1])) bodies = [bodies[i] for i in indices] geoms = [geoms[i] for i in indices] joints = [joints[i] for i in indices] sites = [sites[i] for i in indices] if add_actuators: actuators = [actuators[i] for i in indices] # Now create the corresponding MJCF elements and add them to the root. for i in range(len(bodies)): body = root.worldbody.add("body", **bodies[i]) body.add("geom", **geoms[i]) body.add("joint", **joints[i]) body.add("site", **sites[i]) if add_actuators: root.actuator.add("general", **actuators[i]) return root
/models/piano/piano_mjcf.py
0.792022
0.348396
piano_mjcf.py
pypi
from typing import Sequence import numpy as np from dm_control import composer, mjcf from dm_control.composer.observation import observable from mujoco_utils import mjcf_utils, types from robopianist.models.piano import midi_module, piano_mjcf from robopianist.models.piano import piano_constants as piano_consts # Key color when it is pressed. _ACTIVATION_COLOR = (0.2, 0.8, 0.2, 1.0) # Thresholds for determining whether a key is activated. _KEY_THRESHOLD = 0.00872665 # 0.5 degrees. _SUSTAIN_THRESHOLD = 0.5 class Piano(composer.Entity): """A full-size standard (88-key) digital piano.""" def _build( self, name: str = "piano", add_actuators: bool = False, change_color_on_activation: bool = True, ) -> None: """Initializes the piano. Args: name: Name of the piano. Used as a prefix in the MJCF name attributes. add_actuators: If True, actuators are added to the piano. This is used by the self-actuated piano task. activation_threshold: The threshold, between 0 and 1, beyond which a key is considered activated. change_color_on_activation: If True, the color of the key changes when it becomes activated. """ self._change_color_on_activation = change_color_on_activation self._add_actuators = add_actuators self._midi_module = midi_module.MidiModule() self._mjcf_root = piano_mjcf.build(add_actuators=add_actuators) self._mjcf_root.model = name self._parse_mjcf_elements() self._add_mjcf_elements() self._initialize_state() # Must be defined here for observables. _physics = mjcf.Physics.from_mjcf_model(self._mjcf_root) self._qpos_range = _physics.bind(self.joints).range if add_actuators: self._ctrl_midpoint = np.mean( _physics.bind(self.actuators).ctrlrange, axis=1 ) def _build_observables(self) -> "PianoObservables": return PianoObservables(self) def _parse_mjcf_elements(self) -> None: keys = mjcf_utils.safe_find_all(self._mjcf_root, "body") keys = keys[1:] # Remove the base body. # Sort by increasing key number. sorted_idxs = np.argsort([int(key.name.split("_")[-1]) for key in keys]) self._keys = tuple([keys[idx] for idx in sorted_idxs]) key_geoms = mjcf_utils.safe_find_all(self._mjcf_root, "geom") key_geoms = key_geoms[1:] # Remove the base geom. self._key_geoms = tuple([key_geoms[idx] for idx in sorted_idxs]) joints = mjcf_utils.safe_find_all(self._mjcf_root, "joint") self._joints = tuple([joints[idx] for idx in sorted_idxs]) sites = mjcf_utils.safe_find_all(self._mjcf_root, "site") self._sites = tuple([sites[idx] for idx in sorted_idxs]) size = self._mjcf_root.find("body", "base").geom[0].size self._size = tuple(size) if self._add_actuators: actuators = mjcf_utils.safe_find_all(self._mjcf_root, "actuator") self._actuators = tuple([actuators[idx] for idx in sorted_idxs]) def _add_mjcf_elements(self) -> None: # Add cameras. self._mjcf_root.worldbody.add( "camera", name="closeup", pos="-0.313 0.024 0.455", xyaxes="0.003 -1.000 -0.000 0.607 0.002 0.795", ) self._mjcf_root.worldbody.add( "camera", name="left", pos="0.393 -0.791 0.638", xyaxes="0.808 0.589 0.000 -0.388 0.533 0.752", ) self._mjcf_root.worldbody.add( "camera", name="right", pos="0.472 0.598 0.580", xyaxes="-0.637 0.771 -0.000 -0.510 -0.421 0.750", ) self._mjcf_root.worldbody.add( "camera", name="back", pos="-0.569 0.008 0.841", xyaxes="-0.009 -1.000 0.000 0.783 -0.007 0.622", ) self._mjcf_root.worldbody.add( "camera", name="egocentric", pos="0.417 -0.039 0.717", xyaxes="-0.002 1.000 0.000 -0.867 -0.002 0.498", ) pad_y = 0.5 distance = 1.0 fovy_radians = 2 * np.arctan2(pad_y * self._size[1], distance) self._mjcf_root.worldbody.add( "camera", name="topdown", pos=[0, 0, distance], quat=[1, 0, 0, 1], fovy=np.rad2deg(fovy_radians), ) # Composer methods. def initialize_episode( self, physics: mjcf.Physics, random_state: np.random.RandomState ) -> None: del random_state # Unused. self._initialize_state() self._midi_module.initialize_episode(physics) self._update_key_state(physics) self._update_key_color(physics) def after_substep( self, physics: mjcf.Physics, random_state: np.random.RandomState ) -> None: del random_state # Unused. self._update_key_state(physics) self._update_key_color(physics) self._midi_module.after_substep( physics, self._activation, self._sustain_activation ) # Methods. def _initialize_state(self) -> None: self._state = np.zeros(piano_consts.NUM_KEYS, dtype=np.float64) self._sustain_state = np.zeros(1, dtype=np.float64) self._activation = np.zeros(piano_consts.NUM_KEYS, dtype=bool) self._sustain_activation = np.zeros(1, dtype=bool) self._normalized_state = np.zeros(piano_consts.NUM_KEYS, dtype=np.float64) def is_key_black(self, key_id: int) -> bool: """Returns True if the piano key id corresponds to a black key.""" black_keys = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1] return bool(black_keys[key_id % 12]) def _update_key_state(self, physics: mjcf.Physics) -> None: """Updates the state of the piano keys.""" if self._add_actuators: ctrl_idxs = physics.bind(self.actuators).ctrl >= self._ctrl_midpoint self._activation[:] = ctrl_idxs else: # MuJoCo joint limits are soft, so we clip any joint positions that are # outside their limits. joints_pos = physics.bind(self.joints).qpos self._state[:] = np.clip(joints_pos, *self._qpos_range.T) self._normalized_state[:] = self._state / self._qpos_range[:, 1] self._activation[:] = np.where( np.abs(self._state - self._qpos_range[:, 1]) <= _KEY_THRESHOLD, 1.0, 0.0, ) self._sustain_activation[:] = self._sustain_state >= _SUSTAIN_THRESHOLD def _update_key_color(self, physics: mjcf.Physics) -> None: """Colors the piano keys if they are pressed.""" if self._change_color_on_activation: physics.bind(self._key_geoms).rgba = np.where( self._activation[:, None], _ACTIVATION_COLOR, # Hacky way of restoring key color: we set the rgba of the geom to the # default gray so the inherited material, which specifies the white or # black rgba, kicks in. (0.5, 0.5, 0.5, 1.0), ) else: physics.bind(self._key_geoms).rgba = (0.5, 0.5, 0.5, 1.0) def apply_action( self, physics: mjcf.Physics, action: np.ndarray, random_state: np.random.RandomState, ) -> None: del random_state # Unused. if not self._add_actuators: raise ValueError("Cannot apply action if `add_actuators` is False.") physics.bind(self._actuators).ctrl = action[:-1] self._sustain_state[0] = action[-1] def apply_sustain( self, physics: mjcf.Physics, sustain: float, random_state: np.random.RandomState ) -> None: del physics, random_state # Unused. self._sustain_state[0] = sustain # Accessors. @property def mjcf_model(self) -> types.MjcfRootElement: return self._mjcf_root @property def n_keys(self) -> int: return len(self._keys) @property def joints(self) -> Sequence[types.MjcfElement]: return self._joints @property def keys(self) -> Sequence[types.MjcfElement]: return self._keys @property def activation(self) -> np.ndarray: return self._activation @property def sustain_activation(self) -> np.ndarray: return self._sustain_activation @property def state(self) -> np.ndarray: return self._state @property def normalized_state(self) -> np.ndarray: return self._normalized_state @property def sustain_state(self) -> np.ndarray: return self._sustain_state @property def size(self) -> Sequence[float]: return self._size @property def actuators(self) -> Sequence[types.MjcfElement]: if not self._add_actuators: raise ValueError("You must set add_actuators=True to use this property.") return self._actuators @property def midi_module(self) -> midi_module.MidiModule: return self._midi_module class PianoObservables(composer.Observables): """Observables for the piano.""" _entity: Piano # TODO(kevin): Check if necessary to return copies of the underlying arrays. @composer.observable def joints_pos(self): """Returns the piano key joint positions.""" def _get_joints_pos(physics: mjcf.Physics) -> np.ndarray: # We use the physics bind method because we need to preserve the order of # the joints specified in the class constructor and not the order in which # they are defined in the MJCF file. return physics.bind(self._entity.joints).qpos return observable.Generic(raw_observation_callable=_get_joints_pos) @composer.observable def activation(self): """Returns the piano key activations.""" def _get_activation(physics: mjcf.Physics) -> np.ndarray: del physics # Unused. return self._entity.activation.astype(np.float64) return observable.Generic(raw_observation_callable=_get_activation) @composer.observable def sustain_activation(self): """Returns the sustain pedal activation.""" def _get_activation(physics: mjcf.Physics) -> np.ndarray: del physics # Unused. return self._entity.sustain_activation.astype(np.float64) return observable.Generic(raw_observation_callable=_get_activation) @composer.observable def state(self): """Returns the piano key states.""" def _get_normalized_state(physics: mjcf.Physics) -> np.ndarray: del physics # Unused. return self._entity.normalized_state return observable.Generic(raw_observation_callable=_get_normalized_state) @composer.observable def sustain_state(self): """Returns the sustain pedal state.""" def _get_state(physics: mjcf.Physics) -> np.ndarray: del physics # Unused. return self._entity.sustain_state return observable.Generic(raw_observation_callable=_get_state)
/models/piano/piano.py
0.927978
0.353875
piano.py
pypi
from typing import Callable, List, Optional import numpy as np from dm_control import mjcf from robopianist.models.piano import piano_constants from robopianist.music import midi_file, midi_message class MidiModule: """The piano sound module. It is responsible for tracking the state of the piano keys and generating corresponding MIDI messages. The MIDI messages can be used with a synthesizer to produce sound. """ def __init__(self) -> None: self._note_on_callback: Optional[Callable[[int, int], None]] = None self._note_off_callback: Optional[Callable[[int], None]] = None self._sustain_on_callback: Optional[Callable[[], None]] = None self._sustain_off_callback: Optional[Callable[[], None]] = None def initialize_episode(self, physics: mjcf.Physics) -> None: del physics # Unused. self._prev_activation = np.zeros(piano_constants.NUM_KEYS, dtype=bool) self._prev_sustain_activation = np.zeros(1, dtype=bool) self._midi_messages: List[List[midi_message.MidiMessage]] = [] def after_substep( self, physics: mjcf.Physics, activation: np.ndarray, sustain_activation: np.ndarray, ) -> None: timestep_events: List[midi_message.MidiMessage] = [] message: midi_message.MidiMessage state_change = activation ^ self._prev_activation sustain_change = sustain_activation ^ self._prev_sustain_activation # Note on events. for key_id in np.flatnonzero(state_change & ~self._prev_activation): message = midi_message.NoteOn( note=midi_file.key_number_to_midi_number(key_id), # TODO(kevin): In the future, we will replace this with the actual # key velocity. For now, we hardcode it to the maximum velocity. velocity=127, time=physics.data.time, ) timestep_events.append(message) if self._note_on_callback is not None: self._note_on_callback(message.note, message.velocity) # Note off events. for key_id in np.flatnonzero(state_change & ~activation): message = midi_message.NoteOff( note=midi_file.key_number_to_midi_number(key_id), time=physics.data.time, ) timestep_events.append(message) if self._note_off_callback is not None: self._note_off_callback(message.note) # Sustain pedal events. if sustain_change & ~self._prev_sustain_activation: timestep_events.append(midi_message.SustainOn(time=physics.data.time)) if self._sustain_on_callback is not None: self._sustain_on_callback() if sustain_change & ~sustain_activation: timestep_events.append(midi_message.SustainOff(time=physics.data.time)) if self._sustain_off_callback is not None: self._sustain_off_callback() self._midi_messages.append(timestep_events) self._prev_activation = activation.copy() self._prev_sustain_activation = sustain_activation.copy() def get_latest_midi_messages(self) -> List[midi_message.MidiMessage]: """Returns the MIDI messages generated in the last substep.""" return self._midi_messages[-1] def get_all_midi_messages(self) -> List[midi_message.MidiMessage]: """Returns a list of all MIDI messages generated during the episode.""" return [message for timestep in self._midi_messages for message in timestep] # Callbacks for synthesizer events. def register_synth_note_on_callback( self, callback: Callable[[int, int], None], ) -> None: """Registers a callback for note on events.""" self._note_on_callback = callback def register_synth_note_off_callback( self, callback: Callable[[int], None], ) -> None: """Registers a callback for note off events.""" self._note_off_callback = callback def register_synth_sustain_on_callback( self, callback: Callable[[], None], ) -> None: """Registers a callback for sustain pedal on events.""" self._sustain_on_callback = callback def register_synth_sustain_off_callback( self, callback: Callable[[], None], ) -> None: """Registers a callback for sustain pedal off events.""" self._sustain_off_callback = callback
/models/piano/midi_module.py
0.819677
0.270944
midi_module.py
pypi
# RoboRabbit RoboRabbit is a simple to use, opinionated, asynchronous abstraction over amqp/RabbitMQ (using aio_pika) and configuration CLI. ## Features - Create/assert Queues, Exchanges, and Bindings on connection - Declarative Queue, Exchange, Binding, and Connection configuration using YAML - Very straight forward async message handling - Command line interface for bootstrapping rabbit from your roborabbit yaml config file. ## Installation #### pip $ `pip install roborabbit` #### poetry $ `poetry add roborabbit` ## Handle queue messages The simplest worker possible. Connection information is in the `roborabbit.yaml` file. The method `run()` takes an dictionary with a key/value pair: - key: `queue` - string, the name of the queue to listen to - value: `handler` - function, the callback function messages will be sent to ### Notes - Dead letter exchanges/queues are created and bound for you. (default is {queue_name}_dlq and {queue_name}_dlx) - Messages are `reject`ed and pushed into the dead letter queue when an exception is thrown. - Messages are `nack`ed and returned to queue when disconnected (asyncio.CancelledError). - Messages are `ack`ed automatically after the callback has run without exception. - Multiple queues can be listened to at the same time. - Connection is honored in the following order - The `Connection()` class - Connection parameters defined in your roborabbit.yaml file - Environment variables (see environment variables section) - Default RabbitMQ connection values ### environment variables - `RABBIT_HOST` default 'localhost' - `RABBIT_USER` default 'guest' - `RABBIT_PASS` default 'guest' - `RABBIT_PORT` default 5432 - `RABBIT_VIRTUALHOST` default '/' - `RABBIT_PREFETCH` default 10 ### Basic Example ```py from roborabbit.roborabbit import RoboRabbit from pathlib import Path config_path = Path('roborabbit.yaml') robo = RoboRabbit(config_path) async def queue_handler(msg): print(msg) # your logic here await robo.run({'queue_1', queue_handler}) ``` ### Explicit connection example If you want control over the configuration, you can pass in the roborabbit connection object. ```py from roborabbit.connection import Connection from roborabbit.roborabbit import RoboRabbit from pathlib import Path config_path = Path('roborabbit.yaml') connection = Connection( host='not.localhost.com', username='bob', password='pas123', port=4499, virtualhost='/') robo = RoboRabbit(config_path, connection) async def queue_handler(msg): print(msg) # your logic here async def work(): await robo.run({'queue_1', queue_handler}) ``` ## Command `roborabbit --config path/to/roborabbit.yaml` ### info ``` Usage: roborabbit [OPTIONS] import yaml config file and creates a dictionary from it Options: --config TEXT Path to rabbit config yaml file --host TEXT RabbitMQ host --port TEXT RabbitMQ port --virtualhost TEXT RabbitMQ virtualhost --username TEXT RabbitMQ username --password TEXT RabbitMQ password --help Show this message and exit. ``` ## Override environment variables ``` RABBIT_USER=guest RABBIT_PASS=guest RABBIT_HOST=localhost RABBIT_PORT=5672 RABBIT_VHOST=/ ``` ## Example yaml files ### Simple declare queue, exchange, and bind ``` host: localhost username: guest password: guest virtualhost: / port: 5672 exchanges: - name: exchange_1 type: topic queues: - name: queue_1 bindings: - from: type: exchange name: exchange_1 to: type: queue name: queue_1 routing_keys: - records.created ``` ### Header exchange declaration and binding ``` host: localhost username: guest password: guest virtualhost: / port: 5672 exchanges: - name: exchange_2 type: headers queues: - name: queue_2 bindings: - from: type: exchange name: exchange_2 to: type: queue name: queue_1 bind_options: - x-match: all hw-action: header-value ``` ## All Values Available ``` # Connection info host: localhost username: guest password: guest virtualhost: / port: 5672 # Exchange declarations exchanges: - name: string type: topic|headers|direct|fanout # topic is default durable: false # default auto_delete: true # default # queue declarations queues: - name: string type: quorum # Not required. This is the default and currently only option available (For us, all our queues are quorum. We manually create the queue that needs other requirements). MR welcome dlq: string # default {queue_name}_dlq dlx: string # default {queue_name}_dlx durable: true # default robust: true # default auto_delete: false # default exclusive: false # default auto_delete_delay: 0 # default arguments: # rabbit specific key/value pairs key_1: value_1 key_2: value_2 # bindings bindings: - from: type: exchange name: string to: type: exchange|queue name: string routing_keys: - record.created # list of string, required, unless bind_options is defined bind_options: # list of `x-match` and `header-key`, required if binding to a header exchange - x-match: all|any # header type of matcher header-key: string # header topic to be matched ``` ## Planned features: - Simple message publishing - Expose the underlying channel so you can drop right into aio_pika if you want.
/roborabbit-0.4.2.tar.gz/roborabbit-0.4.2/README.md
0.460532
0.750507
README.md
pypi
from PyQt5 import QtGui, QtCore, QtWidgets from .serial import Serializable from .utils import _build_common_widget class View(Serializable, QtCore.QObject): """ This class allows the user to create and save a layout of Plots. :param plots: View plots. :type plots: list(:py:class:`Plot<RAI.plotting.Plot>`) """ serial_version = '1.0' def __init__(self, plots=None, name='View'): super().__init__() self.plots = plots or [] self.name = name def build(self, width, height, parent=None): """ Build widgets to display View. :param width: width of a Plot. :type width: int :param height: height of a Plot. :type height: int """ # Initialize attributes self.plot_width = width self.plot_height = height self.parent = parent # Build ScrollArea self.scroll_area_plots = QtWidgets.QScrollArea(self.parent) self.scroll_area_plots.setWidgetResizable(True) self.scroll_area_plots_widget_contents = QtWidgets.QWidget(self.scroll_area_plots) self.scroll_area_plots_widget_contents.setGeometry(QtCore.QRect(0, 0, 380, 247)) self.scroll_area_plots.setWidget(self.scroll_area_plots_widget_contents) self.vertical_widget_plots = QtWidgets.QWidget(self.parent) self.vertical_layout_plots = QtWidgets.QVBoxLayout(self.vertical_widget_plots) self.vertical_layout_plots.addWidget(self.scroll_area_plots) self.vertical_layout_plots_scroll = QtWidgets.QVBoxLayout( self.scroll_area_plots_widget_contents) # Insert Plot widget for plot in self.plots: plot_widget = _build_common_widget(self.parent, 1, 2, plot.widget1, plot.widget2, stretch_columns=[2, 1]) plot_widget.setMinimumSize(self.plot_width, self.plot_height) self.vertical_layout_plots_scroll.addWidget(plot_widget) # Install event filter self.scroll_area_plots.verticalScrollBar().installEventFilter(self) def eventFilter(self, obj, event): # Deactivate wheel events because they mess things up with Plots if event.type() == QtCore.QEvent.Wheel: return True return False def deleteLater(self): """Deleting plots to avoid Seg fault""" for plot in self.plots: plot.deleteLater() super().deleteLater() def serialize(self): super().serialize() return {"name": self.name, "plots": self.plots, "serial_version": self.serial_version } @staticmethod def deserialize(dictionary, *args, **kwargs): assert(len(args) >= 1) # we need the plots plots = args[0] dictionary.pop("serial_version") dictionary.pop("plots") return View(plots, **dictionary)
/robot_analysis_interface-1.0rc5-py3-none-any.whl/RAI/view.py
0.892445
0.283437
view.py
pypi
import pkg_resources from .video import Video from .session import Session from .launcher import launcher from . import utils def _demo_plot(): """Demo showing how to create a window with Plots only.""" # Sensors files sensors_filename = pkg_resources.resource_filename('RAI', 'resources/d00300') # Create and launch session my_session = Session(sensors_filename) my_session.launch() def _demo_video(): """Demo showing how to load and display a single Video in an OpenCV window.""" filename = pkg_resources.resource_filename('RAI', 'resources/solo.mp4') vid = Video(filename) utils.show_video(vid) def _demo_session(): """Demo showing how to create a Session.""" # Sensors files sensors_filename = pkg_resources.resource_filename('RAI', 'resources/d00300') # Videos videos_list = [] videos_list.append(pkg_resources.resource_filename('RAI', 'resources/bolt.mp4')) videos_list.append(pkg_resources.resource_filename('RAI', 'resources/solo.mp4')) # Set up views views_data = {} views_data['View1'] = {0: ['LF_z', 'momrate_ref__a'], 1: ['momrate_ref__a'], 2: []} views_data['View2'] = {0: ['momrate_ref__a']} views_data['Empty View'] = {0: []} # Create and launch session my_session = Session(sensors_filename, videos_list, views_data) my_session.launch() def _demo_launcher(): """Demo showing how to use the launcher.""" # Sensors file sensors_filename = '--data=' + pkg_resources.resource_filename('RAI', 'resources/d00300') # Session file session_filename = '--session=' + \ pkg_resources.resource_filename('RAI', 'resources/session_demo.json') # Start launcher launcher([sensors_filename, session_filename]) def _demo_hopper(): """Demo showing how to create a Session using a Pickle file as Sensor.""" # Sensors files sensors_filename = pkg_resources.resource_filename('RAI', 'resources/jviereck_hopper/traj.pkl') # Videos videos_list = [] videos_list.append(pkg_resources.resource_filename( 'RAI', 'resources/jviereck_hopper/recording.mp4')) # Set up views views_data = {} views_data['View1'] = {0: ['baze_z', 'hip'], 1: ['u_knee'], 2: []} views_data['View2'] = {0: ['baze_z']} # Create and launch session my_session = Session(sensors_filename, videos_list, views_data) my_session.launch() def _demo_npzfile(): """Demo creating a session from a compressed .npz file.""" # Sensors files sensors_filename = pkg_resources.resource_filename('RAI', 'resources/demo_data.npz') # Set up views views_data = {} views_data['SingleView'] = {0: ['data0/y', 'data1/z'], 1: ['data1/y']} # Create and launch session my_session = Session(sensors_filename, views_data=views_data) my_session.launch() def demo(which=None): """Caller for the different demos.""" DEMOS = { 'plot': _demo_plot, 'video': _demo_video, 'session': _demo_session, 'launcher': _demo_launcher, 'hopper': _demo_hopper, 'npzfile': _demo_npzfile, } if which not in DEMOS: print("Please indicate which demo you want:") for key in DEMOS: print("\tdemo('%s')" % key) else: DEMOS[which]()
/robot_analysis_interface-1.0rc5-py3-none-any.whl/RAI/demos.py
0.575707
0.197077
demos.py
pypi
from contextlib import suppress from PyQt5 import QtCore from PyQt5.QtGui import QColor import pyqtgraph as pg from .serial import Serializable from .utils import are_zeros_and_ones class Plot(Serializable, QtCore.QObject): """This class provides access to the plot to be displayed. :param dict x: x-axis data :param dict y: y-axis data :param list(str) streams: names of the streams plotted :param str name: Name tag of the plot :param str plot_type: Type of plot. Only PyQtGraph is supported for the moment :param str x_label: x-axis label :param str y_label: y-axis label :param str x_units: x-axis units :param str y_units: y-axis units :param bool include_vertical_lines: if True, add vertical lines that will be connected to the videos """ serial_version = '1.0' line_updated_signal = QtCore.pyqtSignal(float) # Dictionary of colors counting how many times each has been used (initially 0) colors = { 0: (QColor(255, 0, 0), 0), # red 1: (QColor(0, 255, 0), 0), # green 2: (QColor(0, 0, 255), 0), # blue 3: (QColor(0, 255, 255), 0), # cyan 4: (QColor(255, 0, 255), 0), # magenta 5: (QColor(255, 127, 36), 0), # chocolate } # Set up background/foreground colors pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') def __init__(self, x_data=None, y_data=None, streams=None, name='Plot', plot_type='PyQtGraph', x_label='time', y_label='Streams', x_units='s', y_units='', include_vertical_lines=True, parent=None, **kwargs): super().__init__(parent) self.name = name self.plot_type = plot_type self.x_data = x_data or {} self.y_data = y_data or {} self.streams = streams or [] self.x_label = x_label self.y_label = y_label self.x_units = x_units self.y_units = y_units self.include_vertical_lines = include_vertical_lines # ID number in the GridLayout. self.id_number = None if 'id_number' in kwargs: self.id_number = kwargs['id_number'] # Check that x_/ y_data and streams have the same length. assert (len(self.x_data) == len(self.y_data)), \ 'x/y data do not have the same length (%s != %s).' % ( len(self.x_data), len(self.y_data)) assert (len(self.x_data) == len(self.streams)), \ 'x_data and streams do not have the same length (%s != %s).' % ( len(self.x_data), len(self.streams)) def deleteLater(self): # Close PlotItems with suppress(AttributeError): self.plot_item1.close() with suppress(AttributeError): self.plot_item2.close() super().deleteLater() def _prepare(self, parent): """Prepare Widgets/PlotIteams/LinearRegionItems.""" # First create empty plot self.widget1 = pg.GraphicsLayoutWidget(parent) self.plot_item1 = self.widget1.addPlot() self.plot_item1.setLabel('bottom', self.x_label, units=self.x_units) self.plot_item1.setLabel('left', self.y_label, units=self.y_units) self.lr = pg.LinearRegionItem([1.0, 2.0]) # Set up color of the lines self.lr.lines[0].setPen(pg.mkPen('l', width=2)) self.lr.lines[1].setPen(pg.mkPen('l', width=2)) self.lr.lines[0].setHoverPen(pg.mkPen('r', width=2)) self.lr.lines[1].setHoverPen(pg.mkPen('r', width=2)) self.lr.setZValue(-10) self.plot_item1.addItem(self.lr) self.widget2 = pg.GraphicsLayoutWidget(parent) self.plot_item2 = self.widget2.addPlot() self.plot_item2.setLabel('bottom', self.x_label, units=self.x_units) # Connect both panels self.lr.sigRegionChanged.connect(self._update_zoom_plot) self.plot_item2.sigXRangeChanged.connect(self._update_main_plot) self._update_zoom_plot() if self.include_vertical_lines: self.line1 = self.plot_item1.addLine(0.0, pen=pg.mkPen('k', width=2), hoverPen=pg.mkPen('r', width=2), movable=True) self.line2 = self.plot_item2.addLine(0.0, pen=pg.mkPen('k', width=2), hoverPen=pg.mkPen('r', width=2), movable=True) # Connect signals self.line1.sigPositionChangeFinished.connect(self._line_updated) self.line2.sigPositionChangeFinished.connect(self._line_updated) # Add legend and hide it if there is no stream to plot self.legend = pg.LegendItem(offset=(450, 150)) self.legend.setParentItem(self.plot_item1) if len(self.streams) == 0: self.legend.setVisible(False) # Add streams if needed self.curves = {} for stream in self.streams: self._add_stream(self.x_data[stream], self.y_data[stream], stream) # Show grids self.plot_item1.showGrid(x=True, y=True, alpha=0.35) self.plot_item2.showGrid(x=True, y=True, alpha=0.75) def _update_zoom_plot(self): """Update zoomed-in plot when moving the slider on the main plot.""" self.plot_item2.setXRange(*self.lr.getRegion(), padding=0) def _update_main_plot(self): """Update slider on the main plot when resizing zoomed-in plot.""" self.lr.setRegion(self.plot_item2.getViewBox().viewRange()[0]) def _line_updated(self, source): """ Send signal to MainWindow that a line has been moved. :param pg.InfiniteLine source: Updated line """ # New value val = source.value() # Send signal to main window self.line_updated_signal.emit(val) def _update_vertical_lines(self, val): """ Update the vertical lines to new position. :param float value: x-value at which to draw line """ # First disconnect signals to avoid infinite calls self.line1.sigPositionChangeFinished.disconnect(self._line_updated) self.line2.sigPositionChangeFinished.disconnect(self._line_updated) # Update plots self.line2.setValue(val) self.line1.setValue(val) # Reconnect self.line1.sigPositionChangeFinished.connect(self._line_updated) self.line2.sigPositionChangeFinished.connect(self._line_updated) def serialize(self): super().serialize() return { "streams": self.streams, "name": self.name, "plot_type": self.plot_type, "x_label": self.x_label, "y_label": self.y_label, "x_units": self.x_units, "y_units": self.y_units, "include_vertical_lines": self.include_vertical_lines, "id_number": self.id_number, "serial_version": self.serial_version } @staticmethod def deserialize(dictionary, *args, **kwargs): assert(len(args) >= 1) # we need the sensors data sensors_data = args[0] # Get streams data x, y = {}, {} for stream in dictionary['streams']: x1, y1 = sensors_data.get_streams(['time', stream]) x[stream] = x1 y[stream] = y1 dictionary.pop("serial_version") return Plot(x, y, **dictionary) def _add_stream(self, x, y, stream, update_legend=True): """ Add stream to the plot. :param numpy.array x: x-data :param numpy.array y: y-data :param str stream: stream to add """ # Add data and stream in lists (not to do if the plot is being prepared) if stream not in self.streams: self.x_data[stream] = x self.y_data[stream] = y self.streams.append(stream) # Find which color to use color_id = self._get_next_available_color() color = self.colors[color_id][0] # Plot stream # The rendering becomes very slow if we render "booleans" (i.e. 0s and 1s) # because of the connecting lines between points # In case we try to plot such stream, we use a scattered plot instead. plot_kwargs = {} if are_zeros_and_ones(y): plot_kwargs['pen'] = None plot_kwargs['symbolBrush'] = color plot_kwargs['symbolSize'] = 5 plot_kwargs['symbolPen'] = None else: plot_kwargs['pen'] = pg.mkPen(color, width=2) c1 = self.plot_item1.plot(x, y, name=stream, **plot_kwargs) c2 = self.plot_item2.plot(x, y, **plot_kwargs) self.curves[stream] = (c1, c2) # Add color count self.colors[color_id] = (self.colors[color_id][0], self.colors[color_id][1] + 1) # Add item to legend # Make legend visible in case it was not if update_legend: self.legend.addItem(c1, stream) if not self.legend.isVisible(): self.legend.setVisible(True) def _remove_stream(self, stream, update_legend=True): """ Remove stream from plot. :param stream: stream to add :type stream: str """ # Stream should be present assert stream in self.streams, \ "Cannot find stream {} to remove.".format(stream) # Curves to remove (c1, c2) = self.curves[stream] # Find color color_id = self._get_color_from_curve(c1) # Remove curves self.plot_item1.removeItem(c1) self.plot_item2.removeItem(c2) self.curves.pop(stream) # Remove data self.x_data.pop(stream) self.y_data.pop(stream) # Remove stream from list of streams self.streams.remove(stream) # Remove color count self.colors[color_id] = (self.colors[color_id][0], self.colors[color_id][1] - 1) # Remove item from legend # Hide legend in case there is no stream to display if update_legend: self.legend.removeItem(stream) if len(self.legend.items) == 0: self.legend.setVisible(False) def _update_stream(self, x, y, stream): """ Update plots when data has been refreshed. :param numpy.array x: new x-data :param numpy.array y: new y-data :param str stream: name of the stream to update """ # Remove old data self._remove_stream(stream, update_legend=False) # Add new data self._add_stream(x, y, stream, update_legend=False) def _get_next_available_color(self): """ Find the color to use. :returns Index of the color to use :rtype int """ # Loop through all the colors and find which one has been used the least indices = [0] min_val = self.colors[0][1] count = 1 while count < len(self.colors): if self.colors[count] == min_val: indices.append[count] elif self.colors[count][1] < min_val: indices = [count] min_val = self.colors[count][1] else: pass count += 1 return min(indices) def _get_color_from_curve(self, curve): """ Find the color of a curve. :param curve: Curve :type curve: :py:class:`<pyqtgraph.graphicsItems.PlotItem>` :returns Index of the color :rtype int """ try: color = curve.opts.get('pen').color() except AttributeError: # this is a boolean field color = curve.opts.get('symbolBrush') color_id = 0 while color_id < len(self.colors) - 1: if color.__eq__(self.colors[color_id][0]): break color_id += 1 return color_id
/robot_analysis_interface-1.0rc5-py3-none-any.whl/RAI/plotting.py
0.886703
0.50177
plotting.py
pypi
import os import json from time import asctime import uuid from .main_window import CompositeWindow from .sensors import SensorsData from .serial import Serializable from .utils import get_app from .version import __version__ as framework_version from .video import Video class Session(Serializable): """ This class handles the creation of the CompositeWindow and of the necessary instances. It also offers the capability to save and load the current state of the analysis session. :param str sensors_file: Name of the Sensors file :param list(str) video_files: Name of the videos to include :param dict view_data: Data describing the Plots and the View """ serial_version = '1.1' framework_version = framework_version def __init__(self, sensors_file=None, video_files=None, views_data=None): self.ID = uuid.uuid1().hex self.creation_time = asctime() # Paths are relative with respect to the location of the sensors file if sensors_file is not None: self.root_path, self.sensors_file = os.path.split(os.path.abspath(sensors_file)) else: self.root_path = None self.sensors_file = None self.video_files = [] if video_files is not None: for filename in video_files: rel_path = os.path.relpath(filename, self.root_path) self.video_files.append(rel_path) # Add an empty View in case None is provided by the user empty_view = { 'Empty View': {0: []} } views_data = views_data or empty_view if self.sensors_file or self.video_files: self._prepare(views_data) def _prepare(self, views_data): """Prepare the session.""" # Get QApplication app = get_app() # Build sensors data if self.sensors_file is not None: self.sensors_data = SensorsData(self.sensors_file, parent_session=self) # Build videos self.videos = [] for video in self.video_files: self.videos.append(Video(video, parent_session=self)) # Build window self.window = CompositeWindow(views_data, self.videos, parent_session=self) def refresh_sensors_data(self): """Refresh the sensors data.""" assert self.sensors_file is not None, \ 'Sensors file path is None, cannot refresh.' self.sensors_data = SensorsData(self.sensors_file, parent_session=self) def launch(self, start_qt_loop=True): """ Launch the session. :param bool start_qt_loop: start the Qt loop for events input by the user? """ # potential utility to kill the app with normal crtl+c # https://stackoverflow.com/questions/4938723/what-is-the-correct-way-to-make-my-pyqt-application-quit-when-killed-from-the-co # Get QApplication app = get_app() # Show window self.window.show() # Start Qt loop if start_qt_loop: app.exec_() def save(self, session_file): """ Write a JSON file that contains the session data necessary for restart. :param str session_file: session file :returns: saved session filename :rtype: str """ session_file = os.path.abspath(session_file) with open(session_file, 'w') as f: json.dump(self, f, sort_keys=True, indent=4, cls=RAIEncoder) def serialize(self): super().serialize() return { "ID": self.ID, "creation_time": self.creation_time, "root_path": self.root_path, "sensors_data": self.sensors_data, "videos": self.videos, "window": self.window, "serial_version": self.serial_version, "framework_version": self.framework_version } @staticmethod def load(session_file): """ Deserialize JSON file and restart session. :param str session_file: session file :returns: loaded session :rtype: Session """ session_file = os.path.abspath(session_file) with open(session_file) as data_file: data = json.load(data_file) # Call deserialize and return session return Session.deserialize(data) @staticmethod def deserialize(dictionary, *args, **kwargs): # Get QApplication app = get_app() # Build session session = Session() # Some arguments to copy for elem in ["ID", "creation_time"]: setattr(session, elem, dictionary[elem]) # Need to specify parent_session for root_path if dictionary["serial_version"] > '1.0': elem = "root_path" setattr(session, elem, dictionary[elem]) dictionary['sensors_data']['parent_session'] = session for one_dict in dictionary['videos']: one_dict['parent_session'] = session # Build sensors data if dictionary['sensors_data'] is not None: session.sensors_data = SensorsData.deserialize(dictionary['sensors_data'],) setattr(session, 'sensors_file', dictionary['sensors_data']['filename']) # Build videos session.videos = [] if dictionary['videos'] is not None: setattr(session, 'video_files', [video.videoname for video in session.videos]) for one_dict in dictionary['videos']: session.videos.append(Video.deserialize(one_dict)) # Build window session.window = CompositeWindow.deserialize(dictionary['window'], session.videos, session) return session class RAIEncoder(json.JSONEncoder): """JSON encoder for data structures.""" def default(self, o): if issubclass(type(o), Serializable): return o.serialize() return
/robot_analysis_interface-1.0rc5-py3-none-any.whl/RAI/session.py
0.673836
0.25682
session.py
pypi
from pathlib import Path import cv2 from packaging import version from PyQt5 import QtGui, QtCore, QtWidgets from .serial import Serializable # OpenCV API is different between 2.x and 3.x if version.parse(cv2.__version__) < version.parse("3"): cv2.CAP_PROP_FRAME_COUNT = cv2.cv.CV_CAP_PROP_FRAME_COUNT cv2.CAP_PROP_FRAME_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH cv2.CAP_PROP_FRAME_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT cv2.CAP_PROP_FPS = cv2.cv.CV_CAP_PROP_FPS cv2.CAP_PROP_POS_FRAMES = cv2.cv.CV_CAP_PROP_POS_FRAMES class Video(Serializable): """ This class provides access to the video to be displayed. :param str videoname: Name of the video file. :param str name: Name tag of the container. :param float offset: Offset between the video and the sensors in seconds (must be > 0) """ serial_version = '1.2' # Only .mp4 can be read for now. accepted_formats = ['.mp4'] def __init__(self, filename=None, name='Video', offset=0.0, parent_session=None, parent_window=None, **kwargs): ext = Path(filename).suffix if ext not in self.accepted_formats: raise TypeError( f'Cannot load video {filename}. Format must be among {self.accepted_formats}.' ) self.filename = filename self.name = name self.offset = max(offset, 0.0) self.parent_session = parent_session self.parent_window = parent_window self._fill_video_info() # Parameters to be set during the window initialization. # Size of the displayed image self.widget_width = None self.widget_height = None # ID number in the GridLayout. self.id_number = None if 'id_number' in kwargs: self.id_number = kwargs['id_number'] def _fill_video_info(self): """Get information about the video using OpenCV, and fill the instance accordingly.""" # Add root path if needed if self.parent_session is not None: filename = Path(self.parent_session.root_path) / self.filename else: filename = Path(self.filename) self.capture = cv2.VideoCapture(str(filename)) if not self.capture.isOpened(): raise NameError('Cannot initialize video capture from video ' + self.name) self.num_frames = int(self.capture.get(cv2.CAP_PROP_FRAME_COUNT)) self.width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH)) self.height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.fps = self.capture.get(cv2.CAP_PROP_FPS) self.duration = self.num_frames / self.fps self.frame_index = 0 # Initially, point at the first frame self.video_img = None # Current frame as read from the video @property def aspect_ratio(self): """Returns the aspect ratio of the frames.""" return self.widget_width / self.widget_height def _prepare(self): """Prepare Video widget to be used in class <CompositeWindow>.""" # Widget to contain the video self.image_widget = _ImageWidget(parent_video=self, parent=self.parent_window) # Initialization self._update_video_to_given_time(0.0) def _update_offset(self, offset, current_time): """Update the offset of the video and render current frame accordingly.""" self.offset = offset # Update frame (current time stays the same, but frame changes because of the offset change) self._update_video_to_given_time(current_time) def update_video_to_next_frame(self, time): """Update video to the next frame. :param float time: time in seconds which the video should be updated to. """ # If the offset is negative and there is no frame # corresponding to this part of the sensors, # do nothing if time + self.offset < 0: return if self.frame_index < self.num_frames: rec, img = self.read_image_from_capture() # Update frame_index self.frame_index += 1 # The above should work perfectly. However, sometimes, # one cannot read the last frames if not rec: assert self.frame_index > int(0.95 * self.num_frames) img = self.get_closest_frame(self.duration) # Update video image self.video_img = Video.array_to_qimage(img) # Update widget image self.image_widget.set_image(self.video_img, self.widget_width, self.widget_height) def _update_video_to_given_time(self, time): """ Update video to a specific time. :param float time: time in seconds which the video should be updated to. """ # Update video image img = self.get_closest_frame(time + self.offset) self.video_img = Video.array_to_qimage(img) # Update widget image self.image_widget.set_image(self.video_img, self.widget_width, self.widget_height) def get_closest_frame(self, time): """ Return the closest frame to a given time. :param float time: time in seconds which the video should be updated to. :returns: closest frame to time. :rtype: ``numpy.array`` """ # Cannot go below 0 or above the number of frames frame_number = min(max(0, int(time / self.duration * self.num_frames)), self.num_frames) self.capture.set(int(cv2.CAP_PROP_POS_FRAMES), frame_number) rec, img = self.read_image_from_capture() # The above should work perfectly. However, sometimes, # one cannot read the last frames # This is a hacky way to make sure that we get a frame that OpenCV can read. while not rec: frame_number -= 1 self.capture.set(int(cv2.CAP_PROP_POS_FRAMES), frame_number) rec, img = self.read_image_from_capture() self.frame_index = frame_number return img def read_image_from_capture(self): """ Read the frame from the video capture and return the RGB image. :returns: a tuple that contains: * a flag that is True if the reading was successful * the video frame :rtype: tuple(bool,numpy.array) """ rec, img = self.capture.read() if rec: img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return rec, img def _resize_video(self, width, height, ratio=QtCore.Qt.KeepAspectRatio): """ Resize video frame. :param int width: new width :param int height: new height """ # Update attributes self.widget_width = width self.widget_height = height # Just scale image self.image_widget.set_image(self.video_img, self.widget_width, self.widget_height) @ staticmethod def array_to_qimage(array): """ Convert a numpy array to a Qimage. :param numpy.array array: input array :param int width: desired width of the image :param int height: desired height of the image :returns: output image. :rtype: ``QImage`` """ h, w, bpc = array.shape bpl = bpc * w image = QtGui.QImage(array.data, w, h, bpl, QtGui.QImage.Format_RGB888) return image @staticmethod def _scale_image(image, width, height, ratio=QtCore.Qt.KeepAspectRatio): """Scale QImage. :param QImage image: image to scale :param int width: desired width of the image :param int height: desired height of the image :returns: output image :rtype: ``QImage`` """ if (width is not None) and (height is not None): image = image.scaled(width, height, ratio) return image def serialize(self): super().serialize() return { "filename": self.filename, "name": self.name, "widget_width": self.widget_width, "widget_height": self.widget_height, "id_number": self.id_number, "serial_version": self.serial_version, "offset": self.offset } @staticmethod def deserialize(dictionary, *args, **kwargs): # Backwards compatibility if dictionary["serial_version"] == '1.0': dictionary["filename"] = dictionary.pop("videoname") dictionary.pop("serial_version") # TODO: Can I remove that? dictionary.pop('widget_height') # temporary until resizeEvent is fixed dictionary.pop('widget_width') # temporary until resizeEvent is fixed return Video(**dictionary) class _ImageWidget(QtWidgets.QWidget): """This class represents the image widget used to display a video.""" # Now the widget needs to know about the parent video in case of a resizing event. # May be there is a better way, I'll think about it in the resizing story def __init__(self, parent_video=None, parent=None): super().__init__(parent) self.image = None self.parent_video = parent_video self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) def set_image(self, image, width, height): self.image = Video._scale_image(image, width, height, QtCore.Qt.KeepAspectRatio) sz = self.image.size() self.setMinimumSize(sz) self.update() def paintEvent(self, event): # change width of image widget as per video widget # and change height as per aspect ratio width = event.rect().size().width() self.image = Video._scale_image( self.parent_video.video_img, width, width / self.parent_video.aspect_ratio) sz = self.image.size() self.setMinimumSize(sz) qp = QtGui.QPainter() qp.begin(self) if self.image: qp.drawImage(QtCore.QPoint(0, 0), self.image) qp.end()
/robot_analysis_interface-1.0rc5-py3-none-any.whl/RAI/video.py
0.799912
0.28573
video.py
pypi
from PyQt5 import QtGui, QtCore, QtWidgets from .utils import _build_common_widget class SelectionTable(QtWidgets.QTableWidget): """ This class implements a TableWidget used to select streams. :param list(str) headers: Headers of the TableWidget :param list(dict) data: Data to be shown on the TableWidget """ update_plot_signal = QtCore.pyqtSignal(str, int, int) def __init__(self, headers, data, *args): super().__init__(*args) self.headers = headers self.data = data self.parent_window = self.parent() # Make cells read-only self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) # Set up headers self.setHorizontalHeaderLabels(self.headers) self.verticalHeader().setVisible(False) # Set up stream names for row, stream in enumerate(sorted(self.data.keys())): name_item = QtWidgets.QTableWidgetItem(stream) self.setItem(row, 0, name_item) # Add stream data self._setup_data() # Connect signal self.itemClicked.connect(self.item_clicked) def getIdealWidth(self): width = self.verticalHeader().width() width += self.horizontalHeader().length() if self.verticalScrollBar().isVisible(): width += self.verticalScrollBar().width() width += self.frameWidth() * 2 return width def _setup_data(self): for row in range(self.rowCount()): # Find item to update stream = str(self.item(row, 0).text()) # Remove old widget self.removeCellWidget(row, 1) # Add new item val_str = "%6.6e" % self.data[stream] val_item = QtWidgets.QTableWidgetItem(str(val_str)) self.setItem(row, 1, val_item) # Adjust table view self.setVisible(False) self.resizeColumnsToContents() self.resizeRowsToContents() self.setVisible(True) def item_clicked(self, item): # If not stream clicked (first column), return if self.column(item) != 0: return # Item clicked stream = str(item.text()) # Build list window checked_plots = self.parent_window._get_plots_id_with_stream(stream) current_view = self.parent_window.get_current_view() init_states = [QtCore.Qt.Unchecked for _ in range(len(current_view.plots))] for i in checked_plots: init_states[i] = QtCore.Qt.Checked self.check_list = CheckableListWindowWithUpdate(init_states, stream, "Plot", self) # Connect signals self.check_list.item_changed_signal.connect(self._update_plot) # Show window self.check_list.show() def update_data(self, new_table): """ Update displayed value for all streams. :param dict new_table: dictionary containing the current value for all streams. """ self.data = new_table self._setup_data() def _update_plot(self, stream, plot_number, state): """ Send signal to update plot. :param str stream: stream to add or remove :param int plot_number: plot number to update :param int state: state of the checkbox """ # Send signal to the Main Window self.update_plot_signal.emit(stream, plot_number, state) def _select_streams_to_show(self, rexp): """ Update table according to a regular expression :param QtCore.QRegExp rexp: regular expression to be matched """ for row in range(self.rowCount()): name = self.item(row, 0).text() # Stream name if rexp.isEmpty() or rexp.exactMatch(name): self.showRow(row) else: self.hideRow(row) class CheckableListWindow(QtWidgets.QMainWindow): """ This class implements a window showing a checkable list of the plots to clear. :param list(int) init_states: initial state of the checkboxes :param str title: title of the window :param str item_prefix: prefix to add to all items """ DEFAULT_GEOMETRY = (500, 300, 300, 200) ok_signal = QtCore.pyqtSignal(list) def __init__(self, init_states, title='', item_prefix='', *args): super().__init__(*args) self.init_states = init_states self.item_prefix = item_prefix self.title = title self.n_states = len(init_states) self.setGeometry(*self.DEFAULT_GEOMETRY) self.setWindowModality(True) self.setWindowTitle(self.title) # Disable X to force the user to use the Cancel or OK buttons. flags = self.windowFlags() if QtCore.Qt.WindowCloseButtonHint == (flags & QtCore.Qt.WindowCloseButtonHint): flags = flags ^ QtCore.Qt.WindowCloseButtonHint self.setWindowFlags(flags) # Create widget self.widget = QtWidgets.QWidget(self) self.layout = QtWidgets.QGridLayout(self.widget) self.layout.setHorizontalSpacing(1) self.layout.setVerticalSpacing(1) # Create list self.list_widget = QtWidgets.QListWidget(self) for i, state in enumerate(self.init_states): item = QtWidgets.QListWidgetItem(self.item_prefix + ' ' + str(i)) item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable) item.setCheckState(state) self.list_widget.addItem(item) # Connect self.list_widget.itemChanged.connect(self.item_changed) # Add list widget self.layout.addWidget(self.list_widget, 0, 0) # Add buttons self.cancel_btn = QtWidgets.QPushButton("Cancel", self) self.ok_btn = QtWidgets.QPushButton("OK", self) self.ok_btn.setEnabled(False) self.buttons_widget = _build_common_widget(self, 1, 2, self.cancel_btn, self.ok_btn) # Connect self.cancel_btn.clicked.connect(self.cancel_clicked) self.ok_btn.clicked.connect(self.ok_clicked) # Add buttons widget self.layout.addWidget(self.buttons_widget, 1, 0) # Set to central widget and write message self.setCentralWidget(self.widget) def item_changed(self, _item): # Check if we should enable the OK button self.current_states = [self.list_widget.item(i).checkState() for i in range(self.n_states)] if self.current_states != self.init_states: self.ok_btn.setEnabled(True) else: self.ok_btn.setEnabled(False) def ok_clicked(self): plots_to_update = [] for i, state in enumerate(self.current_states): if state == QtCore.Qt.Checked: plots_to_update.append(i) # Send signal to the window to update plots self.ok_signal.emit(plots_to_update) # Close self.close() def cancel_clicked(self): self.close() class CheckableListWindowWithUpdate(CheckableListWindow): """ This class inherits from CheckableListWindow and send a signal when an item state is changed. """ item_changed_signal = QtCore.pyqtSignal(str, int, int) def item_changed(self, item): super().item_changed(item) # Send signal to update plot right away # Get plot number plot_str = item.text().replace(self.item_prefix + ' ', '', 1) plot_number = int(plot_str) # Send signal # The title is the stream name so we use this trick to send it to the Selection window self.item_changed_signal.emit(self.title, plot_number, item.checkState()) def ok_clicked(self): self.close() def cancel_clicked(self): # Roll back to initial state for i, state in enumerate(self.init_states): item = self.list_widget.item(i) if item.checkState() != state: # Send signal # The title is the stream name so we use this trick to send it to the Selection window self.item_changed_signal.emit(self.title, i, state) super().cancel_clicked() class LabelWithAdaptiveText(QtWidgets.QLabel): """A Label with text adapting to its size.""" def paintEvent(self, *args, **kwargs): painter = QtGui.QPainter(self) metrics = QtGui.QFontMetrics(self.font()) elided = metrics.elidedText(self.text(), QtCore.Qt.ElideLeft, self.width()) painter.drawText(self.rect(), self.alignment(), elided)
/robot_analysis_interface-1.0rc5-py3-none-any.whl/RAI/table.py
0.755276
0.230822
table.py
pypi
import struct import os.path from copy import deepcopy import numpy as np import pickle from sys import maxsize try: import pinocchio as se3 from pinocchio.rpy import matrixToRpy USE_PINOCCHIO = True except ImportError: USE_PINOCCHIO = False class DataCollector(object): """ This class manages data. It can read and dump data from a file. It stores data on the fly. """ def __init__(self): # a list of list representing the data self.data = [] # the list of the fields names self.fields = {} # the units of each field self.units = {} # a bunch of data that one may save (date, controller name, ...) self.metadata = {} # some metadata from the d-file loading self.arg_max = 0 self.reduce_x_axis = True def get_streams(self, fields): """ Return a list containing the data from the elements in `fields`. :param fields: Desired stream fields. :type fields: str or list(str) :returns: Data from the elements in `fields` :rtype: list(list(``float``)) """ if isinstance(fields, str): return self._get_one_stream(fields) else: return [self._get_one_stream(field) for field in fields] def _get_one_stream(self, field): """ Return the list containing the information corresponding to the string `field`. :param field: Desired stream field. :type field: str """ if not isinstance(field, str): raise TypeError( "field should be a string, " + "instead got {} of type {}.".format(field, type(field))) if field in self.fields: return self.data[self.fields[field]] raise KeyError(field + ' is not a valid sensor input.') def get_all_streams(self): """ Return a list of list array containing the data from the all elements in the sensors file. :return: the list of all the fields """ return self.data def get_units(self, fields): """ Return a list of numpy array containing the units all elements in `fields`. :param fields: Profiles for which the units are required. :type fields: str or list(str) :return: Units from the elements in `fields` :rtype: list(``numpy.array``) """ if isinstance(fields, str): return self._get_one_unit(fields) else: return [self._get_one_unit(field) for field in fields] def _get_one_unit(self, field): """ Return the units of `field`. :param field: Profile for which the units are required. :type field: str :return: one unit """ if not isinstance(field, str): raise TypeError( "data should be a string, " + "instead got {} of type {}.".format(field, type(field))) if field in self.fields: return self.units[field] raise KeyError(field + ' is not a valid sensor input.') def get_all_units(self): """ Return a list of numpy array containing the units all elements in the sensors file. :return: the list of units stored """ return self.get_units(sorted(self.fields.keys())) def add_variable(self, data, field, unit): """ Append a single float or int to a stream of data. :param data: the float or int to add :param field: the name of the stream :param unit: the unit associated to the data """ if not isinstance(data, (float, int, bool, np.ndarray)): raise TypeError( "data should be a float, an int, or a bool, " + ", instead got {} of type {}.".format(data, type(data))) if not isinstance(field, str): raise TypeError( "field should be a string, instead got {} of type {}." .format(field, type(field))) if not isinstance(unit, str): raise TypeError( "unit should be a string, instead got {} of type {}." .format(unit, type(unit))) if isinstance(data, (bool, int, np.ndarray)): data = float(data) if field not in self.fields: self.data.append([]) self.fields[field] = len(self.data) - 1 self.units[field] = unit self.data[self.fields[field]].append(float(data)) assert len(self.data) == len(self.fields) == len(self.units) def add_vector(self, data_vec, field_vec, unit_vec): """ Append each element of the vector to a different stream of data. :param data_vec: the vector to add :param field_vec: the names of the streams to add the data :param unit_vec: the units of all the vector elements """ if not (self._vector_size(data_vec) == len(field_vec) == len(unit_vec)): raise ValueError("All input lists must have the same size. " + "Instead got sized {}, {}, and {}".format( self._vector_size(data_vec), len(field_vec), len(unit_vec))) if isinstance(data_vec, np.ndarray): data_col_vec = data_vec.reshape(data_vec.size, 1) else: data_col_vec = data_vec for i in range(len(field_vec)): if not isinstance(field_vec[i], str): raise TypeError( "fields should be a string, instead got {} of type {}." .format(field_vec[i], type(field_vec[i]))) if not isinstance(unit_vec[i], str): raise TypeError( "units should be a string, instead got {} of type {}." .format(unit_vec[i], type(unit_vec[i]))) self.add_variable(data_col_vec[i], field_vec[i], unit_vec[i]) def add_vector_3d(self, data_vec, field, unit): """ Append a 3d vector to 3 streams of data with the names append by [x,y,z] :param data_vec: the 3d vector :param field: the basename. :param unit: the unit of all 3 streams """ if not self._vector_size(data_vec) == 3: raise ValueError("data_vec should be a 3D vector, instead got {}" .format(data_vec)) if not isinstance(field, str): raise TypeError( "field should be a string, instead got {} of type {}." .format(field, type(field))) if not isinstance(unit, str): raise TypeError( "unit should be a string, instead got {} of type {}." .format(unit, type(unit))) field_vec = [field + "_x", field + "_y", field + "_z"] unit_vec = 3 * [unit] self.add_vector(data_vec, field_vec, unit_vec) def add_quaternion(self, quaternion, field): """ Append a quaternion to 4 streams of data with the names [qw, qx, qy, qz] :param quaternion: the quaternion to add :param field: the name of the basename for teh quaternion """ field_vec = [field + "_qx", field + "_qy", field + "_qz", field + "_qw"] unit_vec = ["-", "-", "-", "-"] if USE_PINOCCHIO: if isinstance(quaternion, (np.ndarray, list, se3.Quaternion)): self.add_vector(quaternion, field_vec, unit_vec) else: raise TypeError("wrong type, expected a Quaternion," + " a list or a ndarray. Instead got type {}" .format(quaternion)) else: if isinstance(quaternion, (np.ndarray, list)): self.add_vector(quaternion, field_vec, unit_vec) else: raise TypeError("wrong type, expected a list or a ndarray" + ". Instead got type {}".format(quaternion)) def add_rpy(self, rot, field): """ Append a roll, pitch, yaw vector based on quaternion, rotation matrix or quaternion :param rot: the rotation to be converted to rpy :param field: the basename of the rotation """ if isinstance(rot, (np.ndarray, list)) and self._vector_size(rot) == 3: self.add_vector( rot, [field + "_roll", field + "_pitch", field + "_yaw"], ["rad", "rad", "rad"]) elif USE_PINOCCHIO: if isinstance(rot, se3.Quaternion): self.add_rpy(rot.matrix(), field) elif isinstance(rot, np.ndarray) and self._vector_size(rot) == 4: self.add_rpy(se3.Quaternion(float(rot[3]), float(rot[0]), float(rot[1]), float(rot[2])), field) elif isinstance(rot, np.ndarray) and rot.shape == (3, 3): self.add_rpy(matrixToRpy(rot), field) else: if isinstance(rot, np.ndarray): shape = rot.shape else: shape = None raise TypeError( 'Cannot convert vector of shape \'' + str(shape) + '\' ' + 'to rpy. Type has to be either\n' + ' - se3.Quaternion\n' + ' - numpy.ndarray of size (4, 1): [qx, qy, qz, qw]\n' + ' - numpy.ndarray of size (3, 3): a rotation matrix\n' + ' - se3.Quaternion\n' + 'Instead got type {}'.format(rot)) else: raise ValueError('The given vector must be a roll pitch yaw ' + 'vector') def add_se3(self, m, field): """ If Pinocchio is installed, append a SE3 to the streams. :param m: an se3.SE3 object :param field: the basename of the SE3 to add """ if USE_PINOCCHIO: self.add_vector_3d(m.translation, field, "m") self.add_quaternion(se3.Quaternion(m.rotation), field) else: raise ImportError('Without the Pinocchio library this method ' + 'is not implemented') def add_matrix(self, mat, field, unit): """ Append a matrix to the streams. :param mat: the matrix :param field: the basename of the matrix :param unit: the unit of the matrix elements """ if not isinstance(field, str): raise TypeError( "field should be a string, instead got {} of type {}." .format(field, type(field))) if not isinstance(unit, str): raise TypeError( "unit should be a string, instead got {} of type {}." .format(unit, type(unit))) if not isinstance(mat, np.matrix): raise TypeError( "field should be a string, instead got {} of type {}." .format(mat, type(mat))) for i in range(mat.shape[0]): for j in range(mat.shape[1]): self.add_variable(mat[i, j], field + "_" + str(i) + "_" + str(j), unit) def to_dictionary(self): """ Return a dictionary containing the the data it is used to dump the data :return: a dict """ return { 'fields': self.fields, 'units': self.units, 'data': self.data, 'metadata': self.metadata } def from_dictionary(self, dictionary): """ Parse the loaded dictionary from a data file :param dictionary: the dictionary to parse """ self._check_data_dictionary(dictionary) self.metadata = deepcopy(dictionary['metadata']) self.fields = deepcopy(dictionary['fields']) self.units = deepcopy(dictionary['units']) self.data = deepcopy(dictionary['data']) def dump(self, filename): """ Dump the data in as a dictionary in the filename using pickle :param str filename: the path to the file """ if not len(self.data): raise ValueError("No data to dump, the file would be empty") # check the size of each column, they have to be equal. try: for i in range(1, len(self.data)): assert len(self.data[i - 1]) == len(self.data[i]) except AssertionError: raise ValueError("Data ill formed, " + "all stream must have the same length") _, ext = os.path.splitext(filename) if not ext == ".pkl": filename = filename + ".pkl" with open(filename, "wb") as f: pickle.dump(self.to_dictionary(), f) def dump_compressed(self, filename): """ Dump the data in as a compressed format using numpy.savez_compressed :param filename: the filename to dump the data """ DataCollector.dump_npz(filename, [self], ['data']) @staticmethod def dump_npz(filename, list_data_collector, list_zip_names=None): """ Dump a list of DataCollector in a zip folder with numpy.savez_compressed :param filename: the name of the file to dump the data :param list_data_collector: the list of the DataCollector objects :param list_zip_names: the list of names in the zipped folder """ if not isinstance(filename, str): raise TypeError("filename should be of type string or basestring" + ". Instead got {} of type {}" .format(filename, type(filename))) if not all(isinstance(data_collector, DataCollector) for data_collector in list_data_collector): raise TypeError("list_data_collector must be a list of " + "DataCollector. Instead got types {}" .format([type(data) for data in list_data_collector] )) if list_zip_names is not None: assert all(isinstance(zip_name, str) for zip_name in list_zip_names) assert len(list_zip_names) == len(list_data_collector) else: list_zip_names = [] max_nb_file = str(len(str(len(list_data_collector)))) for i, data_collector in enumerate(list_data_collector): list_zip_names.append( str("data{:" + max_nb_file + "d}").format(i)) path, ext = os.path.splitext(filename) if not ext == ".npz": filename = filename + ".npz" data_to_dump = {} for data_collector, zip_name in zip(list_data_collector, list_zip_names): data_to_dump[zip_name] = data_collector.to_dictionary() np.savez_compressed(filename, **data_to_dump) def load(self, filename, reduce_dfile_x_axis=True): """ Load the file from filename :param filename: the root path to the file """ load_functions = { '.pkl': lambda data_file: self.from_dictionary(pickle.load(data_file)), '.npy': lambda data_file: self.from_dictionary(self._parse_npy(data_file)), '.npz': lambda data_file: self.from_dictionary(self._parse_npz_file(data_file)), '': lambda data_file: # default is d-file self.from_dictionary(self._parse_d_file(data_file, reduce_dfile_x_axis)) } if os.path.isfile(filename): _, fileext = os.path.splitext(filename) if fileext not in ['.pkl', '.npy', '.npz']: fileext = '' error_message = ('Cannot load file ' + filename + ' as SensorsData\n' + 'The parser manages files from SL and with the ' + 'extension .pkl, .npy, .npz') try: with open(filename, 'rb') as data_file: load_functions[fileext](data_file) except ValueError as e: error_message = ("ValueError: an error has been detected while " + "loading the data file, \'" + e.message + "\'\n" + error_message) raise RuntimeError(error_message) elif os.path.isdir(filename): self.from_dictionary(self._parse_dg_folder(filename)) else: raise ValueError('The path is not a file or directory.') self.sanity_check() @staticmethod def _parse_dg_folder(data_folder): """ parse from dynamic graph folder :param data_folder: the root path to the last folder in dynamic graph folder :type data_folder: str """ data = {'metadata': {}, 'fields': {}, 'units': {}, 'data': []} # get the time limits file_data = {} stop_time = maxsize for file in os.listdir(data_folder): name, _ = os.path.splitext(file) file_data[name] = np.loadtxt(os.path.join(data_folder, file)) stop_time = min(stop_time, len(file_data[name])) # fill data index = 0 for file in os.listdir(data_folder): name, _ = os.path.splitext(file) if len(file_data[name][:]) == 0: continue for i in range(1, len(file_data[name][0])): data['fields'][name + '/' + str(i)] = index index += 1 data['units'][name + '/' + str(i)] = '_' data['data'].append(file_data[name][:stop_time, i]) data['fields']["time"] = index data['units']["time"] = ["millisec"] data['data'].append(np.arange(0.0, stop_time, 1.0).tolist()) index += 1 return data @staticmethod def _parse_d_file(data_file, reduce_x_axis=True): """ Parse a SL d-file :param filename: the root path to the file :type filename: str """ # Header data = {'metadata': {}, 'fields': {}, 'units': {}, 'data': []} temp = data_file.readline().split() cols = int(temp[1]) rows = int(temp[2]) data['metadata']['frequency'] = float(temp[3]) # Fields and units temp = data_file.readline().decode().split() for i in range(cols): data['fields'][temp[2 * i]] = i data['units'][temp[2 * i]] = temp[2 * i + 1] # full data full_data = np.array(struct.unpack( '>' + 'f' * cols * rows, data_file.read(4 * cols * rows)) ).reshape(rows, cols).transpose() # There might be a bunch of 0's at the end of the array `time`. # So we crop the data here by default time = full_data[data['fields']['time']] data['metadata']['arg_max'] = time.argmax() data['metadata']['reduce_x_axis'] = reduce_x_axis if reduce_x_axis: for i in range(len(data['fields'])): data['data'].append(full_data[i, :data['metadata']['arg_max'] + 1].tolist()) else: for i in range(len(data['fields'])): data['data'].append(full_data[i].tolist()) return data @staticmethod def _parse_npy(data_file): try: data = np.load(data_file, allow_pickle=False).all() except ValueError: data = np.load(data_file, allow_pickle=True).all() return data @staticmethod def _parse_npz_file(data_file): """ Parse the zip folder loaded via numpy.load from a npz file :param data_file: object return by "open(filename)" """ def _parse_data_dictionary(file_dictionary): # deal with all the data from the different files idn = 0 for filezip in file_dictionary: data_dictionary = file_dictionary[filezip].all() # first deal with the frequency data['metadata'][filezip] = {} # copy the metadata separately for each file data['metadata'][filezip] = deepcopy(data_dictionary['metadata']) # accumulate the data changing the field of the fields for i, field in enumerate(data_dictionary['fields']): data['fields'][str(filezip + "/" + field)] = idn + i data['units'][str(filezip + "/" + field)] = ( data_dictionary['units'][field]) data['data'] += [data_dictionary["data"][ data_dictionary["fields"][field]]] idn += len(data_dictionary['fields']) # get the time field assert "time" not in data['fields'] data['fields']["time"] = idn data['units']["time"] = ["s"] time_field_name = file_dictionary.files[0] + "/time" data['data'] += [data['data'][data['fields'][time_field_name]]] return data data = {'metadata': {}, 'fields': {}, 'units': {}, 'data': []} try: with np.load(data_file, allow_pickle=False) as file_dictionary: data = _parse_data_dictionary(file_dictionary) except ValueError: with np.load(data_file, allow_pickle=True) as file_dictionary: data = _parse_data_dictionary(file_dictionary) return data def _check_data_dictionary(self, dictionary): """ Check if the dictionary from the loaded file has the correct fields :param dictionary: the dictionary to test """ if 'fields' not in dictionary: raise ValueError('fields has to be in the read dictionary') if 'units' not in dictionary: raise ValueError('units has to be in the read dictionary') if 'data' not in dictionary: raise ValueError('data has to be in the read dictionary') if 'metadata' not in dictionary: raise ValueError('metadata has to be in the file read') def _vector_size(self, vec): """ simple utility that return either the size of a numpy.ndarray or the length of a list accordingly :param vec: input vector, could be a list or a vector """ if USE_PINOCCHIO: assert isinstance(vec, (list, dict, np.ndarray, se3.Quaternion)) else: assert isinstance(vec, (list, dict, np.ndarray)) if isinstance(vec, np.ndarray): return vec.size else: return len(vec) def sanity_check(self): """ Do a couple of assertion on the object to verify its sanity """ assert isinstance(self.fields, dict) for stream in self.data: for value in stream: assert isinstance(value, float) n = len(self.fields) all_ids = self.fields.values() assert len(all_ids) == n assert set(all_ids) == set(range(n))
/robot_analysis_interface-1.0rc5-py3-none-any.whl/RAI/data_collector.py
0.606732
0.585812
data_collector.py
pypi
import copy import numpy as np from robot_analysis.display import print_matrix def idx_max(lst: list, start=0) -> int: """ Search the index of the bigger absolute value in the given list.\n Code by Côte Geoffrey - 2023 :param list lst: the list to search into :param int start: the index at which the search must begin :return: the index of the max value in the list """ k = start m = np.fabs(lst[start]) for i in range(start, len(lst), 1): if np.fabs(lst[i]) >= m: m = np.fabs(lst[i]) k = i return k def ech_red(matrix: np.ndarray, show_step=False) -> np.ndarray: """ Compute the reduced row echelon form of the given matrix.\n Code by Côte Geoffrey - 2023 :param matrix: the matrix from which the reduced row echelon form should be made :param show_step: whether we should display the intermediate step :return: the reduced row echelon form of the given matrix """ # Make a copy of the matrix to work on out = copy.deepcopy(matrix) n = np.shape(out) r = 0 if show_step: print_matrix(out, name=f"I") # We are iterating over the column for j in range(n[1]): # Finding the pivot value k = idx_max(out[:, j], r) if out[k, j] != 0: r += 1 # Get the pivot to one out[k, :] = out[k, :] / out[k, j] if show_step: print_matrix(out, f"Pk{k}j{j}") # If the pivot is not on the right line, move it if k != r - 1: temp = copy.deepcopy(out[r - 1, :]) out[r - 1, :] = out[k, :] out[k, :] = temp if show_step: print_matrix(out, f"Er{r - 1}k{k}") # Reduced the matrix by removing other values on the same column for i in range(0, n[0]): if i != r - 1: out[i, :] = np.round(out[i, :] - out[r - 1, :] * out[i, j], 6) if show_step: print_matrix(out, f"Ri{i}j{j}") return out
/robot_analysis-0.0.3-py3-none-any.whl/robot_analysis/math.py
0.722527
0.663996
math.py
pypi
import dataclasses import time from enum import Enum from typing import Callable, Dict, List, Optional import numpy as np from robot_arm_controller.control.arm_kinematics import ( ArmKinematics, ArmParameters, ArmPose, ) from robot_arm_controller.control.controller_servers import ( ControllerServer, FIFOLock, WebsocketServer, ) from robot_arm_controller.utils.messages import Message, MessageOp from robot_arm_controller.utils.prints import console class Settings(Enum): HOMING_DIRECTION = 1 SPEED_RAD_PER_S = 5 STEPS_PER_REV_MOTOR_AXIS = 9 CONVERSION_RATE_AXIS_JOINTS = 13 HOMING_OFFSET_RADS = 17 @dataclasses.dataclass class Setting: value: float code_set: int code_get: int last_updated: float = -1 class ArmController: def __init__(self) -> None: self._current_angles: List[float] = [0, 0, 0, 0, 0, 0] self.num_joints: int = len(self._current_angles) self.move_queue_size: int = 0 self.is_homed: bool = False self.last_health_check: float = 0 self.current_angles_lock: FIFOLock = FIFOLock() self.controller_server: ControllerServer = ControllerServer(self, 8500) self.websocket_server: Optional[WebsocketServer] = WebsocketServer(65433, self) # register new handlers here self.message_op_handlers: Dict[MessageOp, Callable[[Message], None]] = { MessageOp.MOVE: self.handle_move_message, MessageOp.STATUS: self.handle_status_message, MessageOp.CONFIG: self.handle_config_message, } # Arm parameters self.arm_params: ArmParameters = ArmParameters() self.arm_params.a2x = 0 self.arm_params.a2z = 172.48 self.arm_params.a3z = 173.5 self.arm_params.a4z = 0 self.arm_params.a4x = 126.2 self.arm_params.a5x = 64.1 self.arm_params.a6x = 169 self.arm_params.j1.set_bounds(-np.pi / 2, np.pi / 2) self.arm_params.j2.set_bounds(-1.39626, 1.57) self.arm_params.j3.set_bounds(-np.pi / 2, np.pi / 2) self.arm_params.j5.set_bounds(-np.pi / 2, np.pi / 2) self.kinematics: ArmKinematics = ArmKinematics(self.arm_params) self.command_cooldown: float = 0.01 self.connection_controller_timeout: int = 15 self.joint_settings: List[Dict[Settings, Setting]] = [] self.joint_settings_response_code: List[Dict[int, Setting]] = [] for _ in range(self.num_joints): current_joint_settings = { Settings.HOMING_DIRECTION: Setting(value=1, code_set=1, code_get=3), Settings.SPEED_RAD_PER_S: Setting(value=1, code_set=5, code_get=7), Settings.STEPS_PER_REV_MOTOR_AXIS: Setting(value=200, code_set=9, code_get=11), Settings.CONVERSION_RATE_AXIS_JOINTS: Setting(value=1, code_set=13, code_get=15), Settings.HOMING_OFFSET_RADS: Setting(value=np.pi / 4, code_set=17, code_get=19), } self.joint_settings.append(current_joint_settings) self.joint_settings_response_code.append( {setting.code_get + 1: setting for setting in current_joint_settings.values()} ) """ ---------------------------------------- General Methods ---------------------------------------- """ def start(self, wait: bool = True, websocket_server: bool = True) -> None: console.print("Starting controller!", style="setup") if websocket_server and self.websocket_server is not None: self.websocket_server.start() else: self.websocket_server = None self.controller_server.start() start_time = time.time() if wait: while not self.is_ready: time.sleep(0.1) if time.time() - start_time > self.connection_controller_timeout: raise TimeoutError("Controller took too long to start, check arm client") console.print("\nController Started!", style="setup") def stop(self) -> None: console.print("Stopping controller...", style="setup", end="\n") if self.websocket_server is not None: self.websocket_server.stop() self.controller_server.stop() console.print("Controller Stopped", style="setup") @property def current_angles(self) -> List[float]: with self.current_angles_lock: return self._current_angles @current_angles.setter def current_angles(self, angles: List[float]) -> None: if len(angles) != self.num_joints: raise ValueError(f"Angles must be a list of length {self.num_joints}") with self.current_angles_lock: self._current_angles = angles @property def is_ready(self) -> bool: if self.websocket_server is None: websocket_server_up = True else: websocket_server_up = self.websocket_server.is_ready controller_server_up = self.controller_server.is_ready return websocket_server_up and controller_server_up """ ---------------------------------------- Handlers ---------------------------------------- """ def handle_move_message(self, message: Message) -> None: pass def handle_status_message(self, message: Message) -> None: code = message.code if code == 1: angles = message.args[: self.num_joints] self.current_angles = angles self.move_queue_size = int(message.args[self.num_joints]) self.is_homed = message.args[self.num_joints + 1] == 1 if code == 4: self.last_health_check = time.time() def handle_config_message(self, message: Message) -> None: code = message.code joint_idx = int(message.args[0]) if code in self.joint_settings_response_code[joint_idx].keys(): setting = self.joint_settings_response_code[joint_idx][code] setting.value = message.args[1] setting.last_updated = time.time() """ ---------------------------------------- API Methods -- MOVE ---------------------------------------- """ def move_to_angles(self, angles: List[float]) -> None: if not self.is_homed: console.print("Arm is not homed", style="error") return message = Message(MessageOp.MOVE, 1, angles) self.controller_server.send_message(message, mutex=True) self.move_queue_size += 1 def move_to(self, pose: ArmPose) -> None: target_angles = self.kinematics.pose_to_angles(pose, self.current_angles) if target_angles is None: console.print("Target pose is not reachable", style="error") return self.move_to_angles(target_angles) def home(self, wait: bool = True) -> None: message = Message(MessageOp.MOVE, 3) self.controller_server.send_message(message, mutex=True) time.sleep(self.command_cooldown) start_time = time.time() self.is_homed = False if wait: while not self.is_homed: time.sleep(0.1) if time.time() - start_time > 60: raise TimeoutError("Arm took too long to home") # wait for angles to get to 0 start_time = time.time() while not all([abs(angle) < 0.1 for angle in self.current_angles]): time.sleep(0.1) if time.time() - start_time > 60: raise TimeoutError("Arm took too long to home") def home_joint(self, joint_idx: int) -> None: message = Message(MessageOp.MOVE, 5, [joint_idx]) self.controller_server.send_message(message, mutex=True) time.sleep(self.command_cooldown) def wait_until_angles_at_target(self, target_angles: List[float], epsilon: float = 0.01) -> None: while not np.allclose(self.current_angles, target_angles, atol=epsilon): time.sleep(0.1) def wait_done_moving(self) -> None: while self.move_queue_size > 0: time.sleep(0.1) """ ---------------------------------------- API Methods -- STATUS ---------------------------------------- """ def health_check(self) -> bool: if not self.is_ready: console.print("Not connected", style="error") return False message = Message(MessageOp.STATUS, 3) current_time = time.time() with self.controller_server.connection_mutex: self.controller_server.send_message(message) while self.last_health_check < current_time and time.time() - current_time < 5: time.sleep(0.01) return self.last_health_check > current_time """ ---------------------------------------- API Methods -- CONFIG ---------------------------------------- """ def set_setting_joint(self, setting_key: Settings, value: float, joint_idx: int) -> None: if setting_key not in self.joint_settings[joint_idx].keys(): raise ValueError(f"Invalid setting key for joint setting: {setting_key}") setting = self.joint_settings[joint_idx][setting_key] code = setting.code_set message = Message(MessageOp.CONFIG, code, [float(joint_idx), value]) self.controller_server.send_message(message, mutex=True) setting.last_updated = -1 def set_setting_joints(self, setting_key: Settings, value: float) -> None: for joint_idx in range(self.num_joints): self.set_setting_joint(setting_key, value, joint_idx) def get_setting_joint(self, setting_key: Settings, joint_idx: int) -> float: if setting_key not in self.joint_settings[joint_idx].keys(): raise ValueError(f"Invalid setting key for joint setting: {setting_key}") setting = self.joint_settings[joint_idx][setting_key] code = setting.code_get if setting.last_updated < 0: # valid value message = Message(MessageOp.CONFIG, code, [float(joint_idx)]) self.controller_server.send_message(message, mutex=True) while setting.last_updated < 0: time.sleep(0.01) return setting.value def get_setting_joints(self, setting_key: Settings) -> List[float]: return [self.get_setting_joint(setting_key, joint_idx) for joint_idx in range(self.num_joints)]
/robot_arm_controller-0.1.9-py3-none-any.whl/robot_arm_controller/controller.py
0.840161
0.197793
controller.py
pypi
from __future__ import annotations import dataclasses from typing import List, Optional, Tuple import numpy as np from robot_arm_controller.utils.algebra import ( create_rotation_matrix_from_euler_angles, degree2rad, extract_euler_angles, nearest_by_2pi_ref, transformation_matrix, x_rotation_matrix, y_rotation_matrix, z_rotation_matrix, ) from robot_arm_controller.utils.prints import console """ ---------------------------------------- Helper Classes ---------------------------------------- """ class Joint: def __init__(self, min_val: float = -2 * np.pi, max_val: float = 2 * np.pi) -> None: self.min_val = min_val self.max_val = max_val def in_bounds(self, angle: float) -> bool: return angle >= self.min_val and angle <= self.max_val def set_bounds(self, min_val: float, max_val: float) -> None: self.min_val = min_val self.max_val = max_val class ArmParameters: def __init__(self) -> None: # J1 self.a1x: float = 0 self.a1y: float = 0 self.a1z: float = 0 # J2 self.a2x: float = 0 self.a2y: float = 0 self.a2z: float = 0 # J3 self.a3x: float = 0 self.a3y: float = 0 self.a3z: float = 0 # J4 self.a4x: float = 0 self.a4y: float = 0 self.a4z: float = 0 # J5 self.a5x: float = 0 self.a5y: float = 0 self.a5z: float = 0 # J6 self.a6x: float = 0 self.a6z: float = 0 self.a6y: float = 0 self.joint_ratios: List[float] = [] self.j1: Joint = Joint() self.j2: Joint = Joint() self.j3: Joint = Joint() self.j4: Joint = Joint() self.j5: Joint = Joint() self.j6: Joint = Joint() self.joints: List[Joint] = [self.j1, self.j2, self.j3, self.j4, self.j5, self.j6] @dataclasses.dataclass class ArmPose: x: float y: float z: float roll: float pitch: float yaw: float tool_pos: float = 90 def __init__( self, x: float, y: float, z: float, roll: float, pitch: float, yaw: float, degree: bool = False, ) -> None: self.x = x self.y = y self.z = z if degree: self.roll = degree2rad(roll) self.pitch = degree2rad(pitch) self.yaw = degree2rad(yaw) else: self.roll = roll self.pitch = pitch self.yaw = yaw @property def as_tuple(self) -> Tuple[float, float, float, float, float, float]: return (self.x, self.y, self.z, self.roll, self.pitch, self.yaw) @property def as_list(self) -> List[float]: return list(self.as_tuple) """ ---------------------------------------- ArmKinematics ---------------------------------------- """ class ArmKinematics: def __init__(self, arm_parameters: ArmParameters) -> None: self.arm_params: ArmParameters = arm_parameters class NotReachableError(Exception): def __init__(self, message: str, angles: List[float]) -> None: super().__init__(message) self.angles: List[float] = angles self.message: str = message def angles_to_pose(self, angles: List[float]) -> ArmPose: if len(angles) != len(self.arm_params.joints): raise ValueError("angles must be the same length as joints") J1, J2, J3, J4, J5, J6 = angles R1 = z_rotation_matrix(J1) D1 = np.array([self.arm_params.a1x, self.arm_params.a1y, self.arm_params.a1z]) T1 = transformation_matrix(R1, D1) # J1 --> J2 R2 = y_rotation_matrix(J2) D2 = np.array([self.arm_params.a2x, self.arm_params.a2y, self.arm_params.a2z]) T2 = transformation_matrix(R2, D2) # J2 --> J3 R3 = y_rotation_matrix(J3) D3 = np.array([self.arm_params.a3x, self.arm_params.a3y, self.arm_params.a3z]) T3 = transformation_matrix(R3, D3) # J3 --> J4 R4 = x_rotation_matrix(J4) D4 = np.array([self.arm_params.a4x, self.arm_params.a4y, self.arm_params.a4z]) T4 = transformation_matrix(R4, D4) # J4 --> J5 R5 = y_rotation_matrix(J5) D5 = np.array([self.arm_params.a5x, self.arm_params.a5y, self.arm_params.a5z]) T5 = transformation_matrix(R5, D5) # J5 --> J6 R6 = x_rotation_matrix(J6) D6 = np.array([self.arm_params.a6x, self.arm_params.a6y, self.arm_params.a6z]) T6 = transformation_matrix(R6, D6) # Base--> TCP position = T1 @ T2 @ T3 @ T4 @ T5 @ T6 @ np.array([[0], [0], [0], [1]]) rotation = R1 @ R2 @ R3 @ R4 @ R5 @ R6 euler_angles = extract_euler_angles(rotation) pos = list(position[:3, 0]) return ArmPose(*pos, *euler_angles) def pose_to_angles(self, target_pose: ArmPose, current_angles: List[float]) -> Optional[List[float]]: try: found_angles = self._pose_to_angles(target_pose, current_angles) return found_angles except self.NotReachableError as exception: console.print(f"NotReachableError in pose_to_angles: {exception}", style="error") return None def _pose_to_angles( self, target_pose: ArmPose, current_angles: Optional[List[float]] = None ) -> List[float]: if current_angles is None: current_angles = [0 for _ in range(len(self.arm_params.joints))] prev_angles = current_angles J1_prev = prev_angles[0] J4_prev = prev_angles[3] x, y, z, roll, pitch, yaw = target_pose.as_tuple TCP = np.array([[x], [y], [z]]) xdirection = create_rotation_matrix_from_euler_angles(roll, pitch, yaw) @ np.array([[1], [0], [0]]) WP = TCP - self.arm_params.a6x * xdirection # Finding J1,J2,J3 J1 = np.arctan2(WP[1, 0], WP[0, 0]) if WP[0, 0] == 0 and WP[1, 0] == 0: J1 = J1_prev WPxy = np.sqrt(WP[0, 0] ** 2 + WP[1, 0] ** 2) L = WPxy - self.arm_params.a2x H = WP[2, 0] - self.arm_params.a1z - self.arm_params.a2z P = np.sqrt(H**2 + L**2) b4x = np.sqrt(self.arm_params.a4z**2 + (self.arm_params.a4x + self.arm_params.a5x) ** 2) if (P <= self.arm_params.a3z + b4x) and abs(self.arm_params.a3z - b4x) < P: alfa = np.arctan2(H, L) cosbeta = (P**2 + self.arm_params.a3z**2 - b4x**2) / (2 * P * self.arm_params.a3z) beta = np.arctan2(np.sqrt(1 - cosbeta**2), cosbeta) cosgamma = (self.arm_params.a3z**2 + b4x**2 - P**2) / (2 * self.arm_params.a3z * b4x) gamma = np.arctan2(np.sqrt(1 - cosgamma**2), cosgamma) delta = np.arctan2(self.arm_params.a4x + self.arm_params.a5x, self.arm_params.a4z) J2 = np.pi / 2.0 - alfa - beta J3 = np.pi - gamma - delta # Finding Wrist Orientation R1 = z_rotation_matrix(J1) R2 = y_rotation_matrix(J2) R3 = y_rotation_matrix(J3) Rarm = R1 @ R2 @ R3 Rarmt = Rarm.transpose() R = create_rotation_matrix_from_euler_angles(roll, pitch, yaw) Rwrist = Rarmt @ R # Finding J4 J5 = np.arctan2(np.sqrt(1 - Rwrist[0, 0] ** 2), Rwrist[0, 0]) if J5 == 0: # Singularity J4 = J4_prev # keep the current angle of J4. J6 = np.arctan2(Rwrist[2, 1], Rwrist[2, 2]) - J4 else: J4_1 = np.arctan2(Rwrist[1, 0], -Rwrist[2, 0]) J4_2 = -np.arctan2(Rwrist[1, 0], Rwrist[2, 0]) J6_1 = np.arctan2(Rwrist[0, 1], Rwrist[0, 2]) J6_2 = -np.arctan2(Rwrist[0, 1], -Rwrist[0, 2]) if abs(J4_prev - J4_1) > abs(J4_prev - J4_2): J4 = J4_2 J6 = J6_2 J5 = np.arctan2(-np.sqrt(1 - Rwrist[0, 0] ** 2), Rwrist[0, 0]) else: J4 = J4_1 J6 = J6_1 found_angles = [J1, J2, J3, J4, J5, J6] return [nearest_by_2pi_ref(angle, ref) for angle, ref in zip(found_angles, prev_angles)] raise self.NotReachableError("Target pose is not reachable", found_angles)
/robot_arm_controller-0.1.9-py3-none-any.whl/robot_arm_controller/control/arm_kinematics.py
0.948191
0.520862
arm_kinematics.py
pypi
import numpy as np __author__ = "Alberto Abarzua" def rad2degree(angle: float) -> float: return (180 / np.pi) * angle def degree2rad(angle: float) -> float: return (np.pi / 180) * angle def x_rotation_matrix(angle: float) -> np.ndarray: """Creates a rotation matrix using angle about the X axis. Args: angle (float): angle that must be in radians Returns: np.array: 3x3 transformation matrix using angle """ r = np.array([[1, 0, 0], [0, np.cos(angle), -np.sin(angle)], [0, np.sin(angle), np.cos(angle)]]) return r def y_rotation_matrix(angle: float) -> np.ndarray: """Creates a rotation matrix using angle about the Y axis. Args: angle (float): angle that must be in radians Returns: np.array: 3x3 transformation matrix using angle """ r = np.array([[np.cos(angle), 0, np.sin(angle)], [0, 1, 0], [-np.sin(angle), 0, np.cos(angle)]]) return r def z_rotation_matrix(angle: float) -> np.ndarray: """Creates a rotation matrix using angle about the Z axis. Args: angle (float): angle that must be in radians Returns: np.array: 3x3 transformation matrix using angle """ r = np.array([[np.cos(angle), -np.sin(angle), 0], [np.sin(angle), np.cos(angle), 0], [0, 0, 1]]) return r def transformation_matrix(R: np.ndarray, D: np.ndarray) -> np.ndarray: """Create a 4x4 homogeneous transformation matrix from a 3x3 rotation matrix and a 3D translation vector. Args: R (np.ndarray): A 3x3 rotation matrix. D (np.ndarray): A 1D array of size 3 representing the translation along the x, y, and z axes. Returns: np.ndarray: A 4x4 homogeneous transformation matrix. Raises: ValueError: If `R` is not a 3x3 matrix, or `D` is not a 1D array of size 3. """ if R.shape != (3, 3) or D.shape != (3,): raise ValueError("R must be a 3x3 matrix and D must be a 1D array of size 3.") return np.block([[R, D.reshape(-1, 1)], [0, 0, 0, 1]]) # reshape the D vector to column vector def extract_euler_angles(R: np.ndarray) -> list[float]: """Extract Euler angles (roll, pitch, yaw) from a given rotation matrix. Args: R (np.ndarray): A 3x3 rotation matrix. Returns: list[float]: The three Euler angles (roll, pitch, yaw) obtained from the rotation matrix in radians. Raises: ValueError: If `R` is not a 3x3 matrix. """ if R.shape != (3, 3): raise ValueError("R must be a 3x3 matrix.") if R[2, 0] != 1 and R[2, 0] != -1: pitch = -np.arcsin(R[2, 0]) roll = np.arctan2(R[2, 1] / np.cos(pitch), R[2, 2] / np.cos(pitch)) yaw = np.arctan2(R[1, 0] / np.cos(pitch), R[0, 0] / np.cos(pitch)) return [float(roll), float(pitch), float(yaw)] else: yaw = 0 if R[2, 0] == -1: pitch = np.pi / 2.0 roll = yaw + np.arctan2(R[0, 1], R[0, 2]) else: pitch = -np.pi / 2.0 roll = -yaw + np.arctan2(-R[0, 1], -R[0, 2]) return [float(roll), float(pitch), float(yaw)] def create_rotation_matrix_from_euler_angles(roll: float, pitch: float, yaw: float) -> np.ndarray: """Create a 3x3 rotation matrix from three Euler angles (roll, pitch, yaw). Args: roll (float): Roll (x-Euler angle) in radians. pitch (float): Pitch (y-Euler angle) in radians. yaw (float): Yaw (z-Euler angle) in radians. Returns: np.ndarray: 3x3 rotation matrix. """ return z_rotation_matrix(yaw) @ y_rotation_matrix(pitch) @ x_rotation_matrix(roll) def nearest_by_2pi_ref(angle: float, ref: float) -> float: """Find the nearest angle to 'ref' that is a 2π away from 'angle' in any direction. Args: angle (float): The starting angle in radians. ref (float): The reference angle in radians. Returns: float: The nearest angle to 'ref' that is a 2π away from 'angle' in any direction. """ two_pi = 2 * np.pi n = round((ref - angle) / two_pi) return angle + n * two_pi
/robot_arm_controller-0.1.9-py3-none-any.whl/robot_arm_controller/utils/algebra.py
0.967132
0.957477
algebra.py
pypi
from __future__ import annotations import dataclasses import struct from enum import Enum __author__ = "Alberto Abarzua" class MessageOp(Enum): MOVE = "M" STATUS = "S" CONFIG = "C" @dataclasses.dataclass class Message: op: MessageOp code: int num_args: int args: list[float] LENGTH_HEADERS = 1 + 4 * 2 # op + code + num_args def __init__(self, op: MessageOp, code: int, args: list[float] = []) -> None: self.op = op self.code = code self.num_args = len(args) self.args = args def __post_init__(self) -> None: error_msg_num_args = f"Number of arguments ({self.num_args}) does not match" error_msg_num_args += f" the length of the args list ({len(self.args)})" assert len(self.args) == self.num_args, error_msg_num_args assert isinstance(self.op, MessageOp), f"op must be a MessageOp, not {type(self.op)}" assert isinstance(self.code, int), f"code must be an int, not {type(self.code)}" assert isinstance(self.num_args, int), f"num_args must be an int, not {type(self.num_args)}" assert isinstance(self.args, list), f"args must be a list, not {type(self.args)}" for arg in self.args: assert isinstance(arg, float), f"args must be a list of floats, not {type(arg)}" def encode(self) -> bytes: return struct.pack( "<cii" + "f" * len(self.args), self.op.value.encode(), self.code, self.num_args, *self.args, ) @staticmethod def decode_headers(bytes: bytes) -> tuple[MessageOp, int, int]: op, code, num_args = struct.unpack_from("<cii", bytes, offset=0) op = op.decode() return MessageOp(op), code, num_args @staticmethod def decode(bytes: bytes) -> Message: op, code, num_args = struct.unpack_from("<cii", bytes, offset=0) args = struct.unpack_from("<" + "f" * num_args, bytes, offset=9) op = op.decode() return Message(MessageOp(op), code, args) # type: ignore def __str__(self) -> str: first_args = self.args[: self.num_args // 2] second_args = self.args[self.num_args // 2 :] first_args_str = ", ".join([f"{arg:.3f}" for arg in first_args]) second_args_str = ", ".join([f"{arg:.3f}" for arg in second_args]) args_str = f"\n{first_args_str}\n{second_args_str}" return f"op: {self.op}, code: {self.code}, num_args: {self.num_args}, args: {args_str}"
/robot_arm_controller-0.1.9-py3-none-any.whl/robot_arm_controller/utils/messages.py
0.846594
0.391639
messages.py
pypi
import time from multiprocessing.managers import SharedMemoryManager import click import cv2 import numpy as np import scipy.spatial.transform as st from diffusion_policy.real_world.real_env import RealEnv from diffusion_policy.real_world.spacemouse_shared_memory import Spacemouse from diffusion_policy.common.precise_sleep import precise_wait from diffusion_policy.real_world.keystroke_counter import KeystrokeCounter, Key, KeyCode @click.command() @click.option( "--output", "-o", required=True, help="Directory to save demonstration dataset." ) @click.option( "--robot_ip", "-ri", required=True, help="UR5's IP address e.g. 192.168.0.204" ) @click.option( "--vis_camera_idx", default=0, type=int, help="Which RealSense camera to visualize." ) @click.option( "--init_joints", "-j", is_flag=True, default=False, help="Whether to initialize robot joint configuration in the beginning.", ) @click.option( "--frequency", "-f", default=10, type=float, help="Control frequency in Hz." ) @click.option( "--command_latency", "-cl", default=0.01, type=float, help="Latency between receiving SapceMouse command to executing on Robot in Sec.", ) def main(output, robot_ip, vis_camera_idx, init_joints, frequency, command_latency): dt = 1 / frequency with SharedMemoryManager() as shm_manager: with KeystrokeCounter() as key_counter, Spacemouse( shm_manager=shm_manager ) as sm, RealEnv( output_dir=output, robot_ip=robot_ip, # recording resolution obs_image_resolution=(1280, 720), frequency=frequency, init_joints=init_joints, enable_multi_cam_vis=True, record_raw_video=True, # number of threads per camera view for video recording (H.264) thread_per_video=3, # video recording quality, lower is better (but slower). video_crf=21, shm_manager=shm_manager, ) as env: cv2.setNumThreads(1) # realsense exposure env.realsense.set_exposure(exposure=120, gain=0) # realsense white balance env.realsense.set_white_balance(white_balance=5900) time.sleep(1.0) print("Ready!") state = env.get_robot_state() target_pose = state["TargetTCPPose"] t_start = time.monotonic() iter_idx = 0 stop = False is_recording = False while not stop: # calculate timing t_cycle_end = t_start + (iter_idx + 1) * dt t_sample = t_cycle_end - command_latency t_command_target = t_cycle_end + dt # pump obs obs = env.get_obs() # handle key presses press_events = key_counter.get_press_events() for key_stroke in press_events: if key_stroke == KeyCode(char="q"): # Exit program stop = True elif key_stroke == KeyCode(char="c"): # Start recording env.start_episode( t_start + (iter_idx + 2) * dt - time.monotonic() + time.time() ) key_counter.clear() is_recording = True print("Recording!") elif key_stroke == KeyCode(char="s"): # Stop recording env.end_episode() key_counter.clear() is_recording = False print("Stopped.") elif key_stroke == Key.backspace: # Delete the most recent recorded episode if click.confirm("Are you sure to drop an episode?"): env.drop_episode() key_counter.clear() is_recording = False # delete stage = key_counter[Key.space] # visualize vis_img = obs[f"camera_{vis_camera_idx}"][-1, :, :, ::-1].copy() episode_id = env.replay_buffer.n_episodes text = f"Episode: {episode_id}, Stage: {stage}" if is_recording: text += ", Recording!" cv2.putText( vis_img, text, (10, 30), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2, color=(255, 255, 255), ) cv2.imshow("default", vis_img) cv2.pollKey() precise_wait(t_sample) # get teleop command sm_state = sm.get_motion_state_transformed() # print(sm_state) dpos = sm_state[:3] * (env.max_pos_speed / frequency) drot_xyz = sm_state[3:] * (env.max_rot_speed / frequency) if not sm.is_button_pressed(0): # translation mode drot_xyz[:] = 0 else: dpos[:] = 0 if not sm.is_button_pressed(1): # 2D translation mode dpos[2] = 0 drot = st.Rotation.from_euler("xyz", drot_xyz) target_pose[:3] += dpos target_pose[3:] = ( drot * st.Rotation.from_rotvec(target_pose[3:]) ).as_rotvec() # execute teleop command env.exec_actions( actions=[target_pose], timestamps=[t_command_target - time.monotonic() + time.time()], stages=[stage], ) precise_wait(t_cycle_end) iter_idx += 1 # %% if __name__ == "__main__": main()
/robot_awe-0.1.tar.gz/robot_awe-0.1/diffusion_policy/demo_real_robot.py
0.422386
0.168036
demo_real_robot.py
pypi
import numpy as np import torch import os import h5py from torch.utils.data import TensorDataset, DataLoader import IPython e = IPython.embed def relabel_waypoints(arr, waypoint_indices): start_idx = 0 for key_idx in waypoint_indices: # Replace the items between the start index and the key index with the key item arr[start_idx:key_idx] = arr[key_idx] start_idx = key_idx return arr class EpisodicDataset(torch.utils.data.Dataset): def __init__( self, episode_ids, dataset_dir, camera_names, norm_stats, use_waypoint=False, constant_waypoint=None, ): super(EpisodicDataset).__init__() self.episode_ids = episode_ids self.dataset_dir = dataset_dir self.camera_names = camera_names self.norm_stats = norm_stats self.is_sim = None self.use_waypoint = use_waypoint self.constant_waypoint = constant_waypoint self.__getitem__(0) # initialize self.is_sim def __len__(self): return len(self.episode_ids) def __getitem__(self, index): sample_full_episode = False # hardcode episode_id = self.episode_ids[index] dataset_path = os.path.join(self.dataset_dir, f"episode_{episode_id}.hdf5") with h5py.File(dataset_path, "r") as root: is_sim = root.attrs["sim"] original_action_shape = root["/action"].shape episode_len = original_action_shape[0] if sample_full_episode: start_ts = 0 else: start_ts = np.random.choice(episode_len) # get observation at start_ts only qpos = root["/observations/qpos"][start_ts] qvel = root["/observations/qvel"][start_ts] image_dict = dict() for cam_name in self.camera_names: image_dict[cam_name] = root[f"/observations/images/{cam_name}"][ start_ts ] # get all actions after and including start_ts if is_sim: action = root["/action"][start_ts:] action_len = episode_len - start_ts else: action = root["/action"][ max(0, start_ts - 1) : ] # hack, to make timesteps more aligned action_len = episode_len - max( 0, start_ts - 1 ) # hack, to make timesteps more aligned if self.use_waypoint and self.constant_waypoint is None: waypoints = root["/waypoints"][()] if self.use_waypoint: # constant waypoints if self.constant_waypoint is not None: assert self.constant_waypoint > 0 waypoints = np.arange(1, action_len, self.constant_waypoint) if len(waypoints) == 0: waypoints = np.array([action_len - 1]) elif waypoints[-1] != action_len - 1: waypoints = np.append(waypoints, action_len - 1) # auto waypoints else: waypoints = waypoints - start_ts waypoints = waypoints[waypoints >= 0] waypoints = waypoints[waypoints < action_len] waypoints = np.append(waypoints, action_len - 1) waypoints = np.unique(waypoints) waypoints = waypoints.astype(np.int32) action = relabel_waypoints(action, waypoints) self.is_sim = is_sim padded_action = np.zeros(original_action_shape, dtype=np.float32) padded_action[:action_len] = action is_pad = np.zeros(episode_len) is_pad[action_len:] = 1 # new axis for different cameras all_cam_images = [] for cam_name in self.camera_names: all_cam_images.append(image_dict[cam_name]) all_cam_images = np.stack(all_cam_images, axis=0) # construct observations image_data = torch.from_numpy(all_cam_images) qpos_data = torch.from_numpy(qpos).float() action_data = torch.from_numpy(padded_action).float() is_pad = torch.from_numpy(is_pad).bool() # channel last image_data = torch.einsum("k h w c -> k c h w", image_data) # normalize image and change dtype to float image_data = image_data / 255.0 action_data = (action_data - self.norm_stats["action_mean"]) / self.norm_stats[ "action_std" ] qpos_data = (qpos_data - self.norm_stats["qpos_mean"]) / self.norm_stats[ "qpos_std" ] return image_data, qpos_data, action_data, is_pad def get_norm_stats(dataset_dir, num_episodes): all_qpos_data = [] all_action_data = [] for episode_idx in range(num_episodes): dataset_path = os.path.join(dataset_dir, f"episode_{episode_idx}.hdf5") with h5py.File(dataset_path, "r") as root: qpos = root["/observations/qpos"][()] qvel = root["/observations/qvel"][()] action = root["/action"][()] all_qpos_data.append(torch.from_numpy(qpos)) all_action_data.append(torch.from_numpy(action)) all_qpos_data = torch.stack(all_qpos_data) all_action_data = torch.stack(all_action_data) all_action_data = all_action_data # normalize action data action_mean = all_action_data.mean(dim=[0, 1], keepdim=True) action_std = all_action_data.std(dim=[0, 1], keepdim=True) action_std = torch.clip(action_std, 1e-2, 10) # clipping # normalize qpos data qpos_mean = all_qpos_data.mean(dim=[0, 1], keepdim=True) qpos_std = all_qpos_data.std(dim=[0, 1], keepdim=True) qpos_std = torch.clip(qpos_std, 1e-2, 10) # clipping stats = { "action_mean": action_mean.numpy().squeeze(), "action_std": action_std.numpy().squeeze(), "qpos_mean": qpos_mean.numpy().squeeze(), "qpos_std": qpos_std.numpy().squeeze(), "example_qpos": qpos, } return stats def load_data( dataset_dir, num_episodes, camera_names, batch_size_train, batch_size_val, use_waypoint=False, constant_waypoint=None, ): print(f"\nData from: {dataset_dir}\n") # obtain train test split train_ratio = 0.8 shuffled_indices = np.random.permutation(num_episodes) train_indices = shuffled_indices[: int(train_ratio * num_episodes)] val_indices = shuffled_indices[int(train_ratio * num_episodes) :] # obtain normalization stats for qpos and action norm_stats = get_norm_stats(dataset_dir, num_episodes) # construct dataset and dataloader train_dataset = EpisodicDataset( train_indices, dataset_dir, camera_names, norm_stats, use_waypoint=use_waypoint, constant_waypoint=constant_waypoint, ) val_dataset = EpisodicDataset( val_indices, dataset_dir, camera_names, norm_stats, use_waypoint=use_waypoint, constant_waypoint=constant_waypoint, ) train_dataloader = DataLoader( train_dataset, batch_size=batch_size_train, shuffle=True, pin_memory=True, num_workers=1, prefetch_factor=1, ) val_dataloader = DataLoader( val_dataset, batch_size=batch_size_val, shuffle=True, pin_memory=True, num_workers=1, prefetch_factor=1, ) return train_dataloader, val_dataloader, norm_stats, train_dataset.is_sim ### env utils def sample_box_pose(): x_range = [0.0, 0.2] y_range = [0.4, 0.6] z_range = [0.05, 0.05] ranges = np.vstack([x_range, y_range, z_range]) cube_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) cube_quat = np.array([1, 0, 0, 0]) return np.concatenate([cube_position, cube_quat]) def sample_insertion_pose(): # Peg x_range = [0.1, 0.2] y_range = [0.4, 0.6] z_range = [0.05, 0.05] ranges = np.vstack([x_range, y_range, z_range]) peg_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) peg_quat = np.array([1, 0, 0, 0]) peg_pose = np.concatenate([peg_position, peg_quat]) # Socket x_range = [-0.2, -0.1] y_range = [0.4, 0.6] z_range = [0.05, 0.05] ranges = np.vstack([x_range, y_range, z_range]) socket_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) socket_quat = np.array([1, 0, 0, 0]) socket_pose = np.concatenate([socket_position, socket_quat]) return peg_pose, socket_pose ### helper functions def compute_dict_mean(epoch_dicts): result = {k: None for k in epoch_dicts[0]} num_items = len(epoch_dicts) for k in result: value_sum = 0 for epoch_dict in epoch_dicts: value_sum += epoch_dict[k] result[k] = value_sum / num_items return result def detach_dict(d): new_d = dict() for k, v in d.items(): new_d[k] = v.detach() return new_d def set_seed(seed): torch.manual_seed(seed) np.random.seed(seed)
/robot_awe-0.1.tar.gz/robot_awe-0.1/act/act_utils.py
0.697094
0.403332
act_utils.py
pypi
import time import os import numpy as np import argparse import matplotlib.pyplot as plt import h5py from constants import PUPPET_GRIPPER_POSITION_NORMALIZE_FN, SIM_TASK_CONFIGS from ee_sim_env import make_ee_sim_env from sim_env import make_sim_env, BOX_POSE from scripted_policy import PickAndTransferPolicy, InsertionPolicy import IPython e = IPython.embed def main(args): """ Generate demonstration data in simulation. First rollout the policy (defined in ee space) in ee_sim_env. Obtain the joint trajectory. Replace the gripper joint positions with the commanded joint position. Replay this joint trajectory (as action sequence) in sim_env, and record all observations. Save this episode of data, and continue to next episode of data collection. """ task_name = args["task_name"] dataset_dir = args["dataset_dir"] num_episodes = args["num_episodes"] onscreen_render = args["onscreen_render"] inject_noise = False render_cam_name = "angle" if not os.path.isdir(dataset_dir): os.makedirs(dataset_dir, exist_ok=True) episode_len = SIM_TASK_CONFIGS[task_name]["episode_len"] camera_names = SIM_TASK_CONFIGS[task_name]["camera_names"] if task_name == "sim_transfer_cube_scripted": policy_cls = PickAndTransferPolicy elif task_name == "sim_insertion_scripted": policy_cls = InsertionPolicy else: raise NotImplementedError success = [] for episode_idx in range(num_episodes): print(f"{episode_idx=}") print("Rollout out EE space scripted policy") # setup the environment env = make_ee_sim_env(task_name) ts = env.reset() episode = [ts] policy = policy_cls(inject_noise) # setup plotting if onscreen_render: ax = plt.subplot() plt_img = ax.imshow(ts.observation["images"][render_cam_name]) plt.ion() for step in range(episode_len): action = policy(ts) ts = env.step(action) episode.append(ts) if onscreen_render: plt_img.set_data(ts.observation["images"][render_cam_name]) plt.pause(0.002) plt.close() episode_return = np.sum([ts.reward for ts in episode[1:]]) episode_max_reward = np.max([ts.reward for ts in episode[1:]]) if episode_max_reward == env.task.max_reward: print(f"{episode_idx=} Successful, {episode_return=}") else: print(f"{episode_idx=} Failed") joint_traj = [ts.observation["qpos"] for ts in episode] # replace gripper pose with gripper control gripper_ctrl_traj = [ts.observation["gripper_ctrl"] for ts in episode] for joint, ctrl in zip(joint_traj, gripper_ctrl_traj): left_ctrl = PUPPET_GRIPPER_POSITION_NORMALIZE_FN(ctrl[0]) right_ctrl = PUPPET_GRIPPER_POSITION_NORMALIZE_FN(ctrl[2]) joint[6] = left_ctrl joint[6 + 7] = right_ctrl subtask_info = episode[0].observation["env_state"].copy() # box pose at step 0 # clear unused variables del env del episode del policy # setup the environment print("Replaying joint commands") env = make_sim_env(task_name) BOX_POSE[ 0 ] = subtask_info # make sure the sim_env has the same object configurations as ee_sim_env ts = env.reset() episode_replay = [ts] # setup plotting if onscreen_render: ax = plt.subplot() plt_img = ax.imshow(ts.observation["images"][render_cam_name]) plt.ion() for t in range(len(joint_traj)): # note: this will increase episode length by 1 action = joint_traj[t] ts = env.step(action) episode_replay.append(ts) if onscreen_render: plt_img.set_data(ts.observation["images"][render_cam_name]) plt.pause(0.02) episode_return = np.sum([ts.reward for ts in episode_replay[1:]]) episode_max_reward = np.max([ts.reward for ts in episode_replay[1:]]) if episode_max_reward == env.task.max_reward: success.append(1) print(f"{episode_idx=} Successful, {episode_return=}") else: success.append(0) print(f"{episode_idx=} Failed") plt.close() """ For each timestep: observations - images - each_cam_name (480, 640, 3) 'uint8' - qpos (14,) 'float64' - qvel (14,) 'float64' action (14,) 'float64' """ data_dict = { "/observations/qpos": [], "/observations/qvel": [], "/action": [], } for cam_name in camera_names: data_dict[f"/observations/images/{cam_name}"] = [] # because the replaying, there will be eps_len + 1 actions and eps_len + 2 timesteps # truncate here to be consistent joint_traj = joint_traj[:-1] episode_replay = episode_replay[:-1] # len(joint_traj) i.e. actions: max_timesteps # len(episode_replay) i.e. time steps: max_timesteps + 1 max_timesteps = len(joint_traj) while joint_traj: action = joint_traj.pop(0) ts = episode_replay.pop(0) data_dict["/observations/qpos"].append(ts.observation["qpos"]) data_dict["/observations/qvel"].append(ts.observation["qvel"]) data_dict["/action"].append(action) for cam_name in camera_names: data_dict[f"/observations/images/{cam_name}"].append( ts.observation["images"][cam_name] ) # HDF5 t0 = time.time() dataset_path = os.path.join(dataset_dir, f"episode_{episode_idx}") with h5py.File(dataset_path + ".hdf5", "w", rdcc_nbytes=1024**2 * 2) as root: root.attrs["sim"] = True obs = root.create_group("observations") image = obs.create_group("images") for cam_name in camera_names: _ = image.create_dataset( cam_name, (max_timesteps, 480, 640, 3), dtype="uint8", chunks=(1, 480, 640, 3), ) # compression='gzip',compression_opts=2,) # compression=32001, compression_opts=(0, 0, 0, 0, 9, 1, 1), shuffle=False) qpos = obs.create_dataset("qpos", (max_timesteps, 14)) qvel = obs.create_dataset("qvel", (max_timesteps, 14)) action = root.create_dataset("action", (max_timesteps, 14)) for name, array in data_dict.items(): root[name][...] = array print(f"Saving: {time.time() - t0:.1f} secs\n") print(f"Saved to {dataset_dir}") print(f"Success: {np.sum(success)} / {len(success)}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--task_name", action="store", type=str, help="task_name", required=True ) parser.add_argument( "--dataset_dir", action="store", type=str, help="dataset saving dir", required=True, ) parser.add_argument( "--num_episodes", action="store", type=int, help="num_episodes", required=False ) parser.add_argument("--onscreen_render", action="store_true") main(vars(parser.parse_args()))
/robot_awe-0.1.tar.gz/robot_awe-0.1/act/record_sim_episodes.py
0.444806
0.32548
record_sim_episodes.py
pypi
import torch.nn as nn from torch.nn import functional as F import torchvision.transforms as transforms from act.detr.main import ( build_ACT_model_and_optimizer, build_CNNMLP_model_and_optimizer, ) import IPython e = IPython.embed class ACTPolicy(nn.Module): def __init__(self, args_override): super().__init__() model, optimizer = build_ACT_model_and_optimizer(args_override) self.model = model # CVAE decoder self.optimizer = optimizer self.kl_weight = args_override["kl_weight"] print(f"KL Weight {self.kl_weight}") def __call__(self, qpos, image, actions=None, is_pad=None): env_state = None normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) image = normalize(image) if actions is not None: # training time actions = actions[:, : self.model.num_queries] is_pad = is_pad[:, : self.model.num_queries] a_hat, is_pad_hat, (mu, logvar) = self.model( qpos, image, env_state, actions, is_pad ) total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar) loss_dict = dict() all_l1 = F.l1_loss(actions, a_hat, reduction="none") l1 = (all_l1 * ~is_pad.unsqueeze(-1)).mean() loss_dict["l1"] = l1 loss_dict["kl"] = total_kld[0] loss_dict["loss"] = loss_dict["l1"] + loss_dict["kl"] * self.kl_weight return loss_dict else: # inference time a_hat, _, (_, _) = self.model( qpos, image, env_state ) # no action, sample from prior return a_hat def configure_optimizers(self): return self.optimizer class CNNMLPPolicy(nn.Module): def __init__(self, args_override): super().__init__() model, optimizer = build_CNNMLP_model_and_optimizer(args_override) self.model = model # decoder self.optimizer = optimizer def __call__(self, qpos, image, actions=None, is_pad=None): env_state = None # TODO normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) image = normalize(image) if actions is not None: # training time actions = actions[:, 0] a_hat = self.model(qpos, image, env_state, actions) mse = F.mse_loss(actions, a_hat) loss_dict = dict() loss_dict["mse"] = mse loss_dict["loss"] = loss_dict["mse"] return loss_dict else: # inference time a_hat = self.model(qpos, image, env_state) # no action, sample from prior return a_hat def configure_optimizers(self): return self.optimizer def kl_divergence(mu, logvar): batch_size = mu.size(0) assert batch_size != 0 if mu.data.ndimension() == 4: mu = mu.view(mu.size(0), mu.size(1)) if logvar.data.ndimension() == 4: logvar = logvar.view(logvar.size(0), logvar.size(1)) klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()) total_kld = klds.sum(1).mean(0, True) dimension_wise_kld = klds.mean(0) mean_kld = klds.mean(1).mean(0, True) return total_kld, dimension_wise_kld, mean_kld
/robot_awe-0.1.tar.gz/robot_awe-0.1/act/policy.py
0.847195
0.405508
policy.py
pypi
import numpy as np import matplotlib.pyplot as plt from pyquaternion import Quaternion from constants import SIM_TASK_CONFIGS from ee_sim_env import make_ee_sim_env import IPython e = IPython.embed class BasePolicy: def __init__(self, inject_noise=False): self.inject_noise = inject_noise self.step_count = 0 self.left_trajectory = None self.right_trajectory = None def generate_trajectory(self, ts_first): raise NotImplementedError @staticmethod def interpolate(curr_waypoint, next_waypoint, t): t_frac = (t - curr_waypoint["t"]) / (next_waypoint["t"] - curr_waypoint["t"]) curr_xyz = curr_waypoint["xyz"] curr_quat = curr_waypoint["quat"] curr_grip = curr_waypoint["gripper"] next_xyz = next_waypoint["xyz"] next_quat = next_waypoint["quat"] next_grip = next_waypoint["gripper"] xyz = curr_xyz + (next_xyz - curr_xyz) * t_frac quat = curr_quat + (next_quat - curr_quat) * t_frac gripper = curr_grip + (next_grip - curr_grip) * t_frac return xyz, quat, gripper def __call__(self, ts): # generate trajectory at first timestep, then open-loop execution if self.step_count == 0: self.generate_trajectory(ts) # obtain left and right waypoints if self.left_trajectory[0]["t"] == self.step_count: self.curr_left_waypoint = self.left_trajectory.pop(0) next_left_waypoint = self.left_trajectory[0] if self.right_trajectory[0]["t"] == self.step_count: self.curr_right_waypoint = self.right_trajectory.pop(0) next_right_waypoint = self.right_trajectory[0] # interpolate between waypoints to obtain current pose and gripper command left_xyz, left_quat, left_gripper = self.interpolate( self.curr_left_waypoint, next_left_waypoint, self.step_count ) right_xyz, right_quat, right_gripper = self.interpolate( self.curr_right_waypoint, next_right_waypoint, self.step_count ) # Inject noise if self.inject_noise: scale = 0.01 left_xyz = left_xyz + np.random.uniform(-scale, scale, left_xyz.shape) right_xyz = right_xyz + np.random.uniform(-scale, scale, right_xyz.shape) action_left = np.concatenate([left_xyz, left_quat, [left_gripper]]) action_right = np.concatenate([right_xyz, right_quat, [right_gripper]]) self.step_count += 1 return np.concatenate([action_left, action_right]) class PickAndTransferPolicy(BasePolicy): def generate_trajectory(self, ts_first): init_mocap_pose_right = ts_first.observation["mocap_pose_right"] init_mocap_pose_left = ts_first.observation["mocap_pose_left"] box_info = np.array(ts_first.observation["env_state"]) box_xyz = box_info[:3] box_quat = box_info[3:] # print(f"Generate trajectory for {box_xyz=}") gripper_pick_quat = Quaternion(init_mocap_pose_right[3:]) gripper_pick_quat = gripper_pick_quat * Quaternion( axis=[0.0, 1.0, 0.0], degrees=-60 ) meet_left_quat = Quaternion(axis=[1.0, 0.0, 0.0], degrees=90) meet_xyz = np.array([0, 0.5, 0.25]) self.left_trajectory = [ { "t": 0, "xyz": init_mocap_pose_left[:3], "quat": init_mocap_pose_left[3:], "gripper": 0, }, # sleep { "t": 100, "xyz": meet_xyz + np.array([-0.1, 0, -0.02]), "quat": meet_left_quat.elements, "gripper": 1, }, # approach meet position { "t": 260, "xyz": meet_xyz + np.array([0.02, 0, -0.02]), "quat": meet_left_quat.elements, "gripper": 1, }, # move to meet position { "t": 310, "xyz": meet_xyz + np.array([0.02, 0, -0.02]), "quat": meet_left_quat.elements, "gripper": 0, }, # close gripper { "t": 360, "xyz": meet_xyz + np.array([-0.1, 0, -0.02]), "quat": np.array([1, 0, 0, 0]), "gripper": 0, }, # move left { "t": 400, "xyz": meet_xyz + np.array([-0.1, 0, -0.02]), "quat": np.array([1, 0, 0, 0]), "gripper": 0, }, # stay ] self.right_trajectory = [ { "t": 0, "xyz": init_mocap_pose_right[:3], "quat": init_mocap_pose_right[3:], "gripper": 0, }, # sleep { "t": 90, "xyz": box_xyz + np.array([0, 0, 0.08]), "quat": gripper_pick_quat.elements, "gripper": 1, }, # approach the cube { "t": 130, "xyz": box_xyz + np.array([0, 0, -0.015]), "quat": gripper_pick_quat.elements, "gripper": 1, }, # go down { "t": 170, "xyz": box_xyz + np.array([0, 0, -0.015]), "quat": gripper_pick_quat.elements, "gripper": 0, }, # close gripper { "t": 200, "xyz": meet_xyz + np.array([0.05, 0, 0]), "quat": gripper_pick_quat.elements, "gripper": 0, }, # approach meet position { "t": 220, "xyz": meet_xyz, "quat": gripper_pick_quat.elements, "gripper": 0, }, # move to meet position { "t": 310, "xyz": meet_xyz, "quat": gripper_pick_quat.elements, "gripper": 1, }, # open gripper { "t": 360, "xyz": meet_xyz + np.array([0.1, 0, 0]), "quat": gripper_pick_quat.elements, "gripper": 1, }, # move to right { "t": 400, "xyz": meet_xyz + np.array([0.1, 0, 0]), "quat": gripper_pick_quat.elements, "gripper": 1, }, # stay ] class InsertionPolicy(BasePolicy): def generate_trajectory(self, ts_first): init_mocap_pose_right = ts_first.observation["mocap_pose_right"] init_mocap_pose_left = ts_first.observation["mocap_pose_left"] peg_info = np.array(ts_first.observation["env_state"])[:7] peg_xyz = peg_info[:3] peg_quat = peg_info[3:] socket_info = np.array(ts_first.observation["env_state"])[7:] socket_xyz = socket_info[:3] socket_quat = socket_info[3:] gripper_pick_quat_right = Quaternion(init_mocap_pose_right[3:]) gripper_pick_quat_right = gripper_pick_quat_right * Quaternion( axis=[0.0, 1.0, 0.0], degrees=-60 ) gripper_pick_quat_left = Quaternion(init_mocap_pose_right[3:]) gripper_pick_quat_left = gripper_pick_quat_left * Quaternion( axis=[0.0, 1.0, 0.0], degrees=60 ) meet_xyz = np.array([0, 0.5, 0.15]) lift_right = 0.00715 self.left_trajectory = [ { "t": 0, "xyz": init_mocap_pose_left[:3], "quat": init_mocap_pose_left[3:], "gripper": 0, }, # sleep { "t": 120, "xyz": socket_xyz + np.array([0, 0, 0.08]), "quat": gripper_pick_quat_left.elements, "gripper": 1, }, # approach the cube { "t": 170, "xyz": socket_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_left.elements, "gripper": 1, }, # go down { "t": 220, "xyz": socket_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_left.elements, "gripper": 0, }, # close gripper { "t": 285, "xyz": meet_xyz + np.array([-0.1, 0, 0]), "quat": gripper_pick_quat_left.elements, "gripper": 0, }, # approach meet position { "t": 340, "xyz": meet_xyz + np.array([-0.05, 0, 0]), "quat": gripper_pick_quat_left.elements, "gripper": 0, }, # insertion { "t": 400, "xyz": meet_xyz + np.array([-0.05, 0, 0]), "quat": gripper_pick_quat_left.elements, "gripper": 0, }, # insertion ] self.right_trajectory = [ { "t": 0, "xyz": init_mocap_pose_right[:3], "quat": init_mocap_pose_right[3:], "gripper": 0, }, # sleep { "t": 120, "xyz": peg_xyz + np.array([0, 0, 0.08]), "quat": gripper_pick_quat_right.elements, "gripper": 1, }, # approach the cube { "t": 170, "xyz": peg_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_right.elements, "gripper": 1, }, # go down { "t": 220, "xyz": peg_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_right.elements, "gripper": 0, }, # close gripper { "t": 285, "xyz": meet_xyz + np.array([0.1, 0, lift_right]), "quat": gripper_pick_quat_right.elements, "gripper": 0, }, # approach meet position { "t": 340, "xyz": meet_xyz + np.array([0.05, 0, lift_right]), "quat": gripper_pick_quat_right.elements, "gripper": 0, }, # insertion { "t": 400, "xyz": meet_xyz + np.array([0.05, 0, lift_right]), "quat": gripper_pick_quat_right.elements, "gripper": 0, }, # insertion ] def test_policy(task_name): # example rolling out pick_and_transfer policy onscreen_render = True inject_noise = False # setup the environment episode_len = SIM_TASK_CONFIGS[task_name]["episode_len"] if "sim_transfer_cube" in task_name: env = make_ee_sim_env("sim_transfer_cube") elif "sim_insertion" in task_name: env = make_ee_sim_env("sim_insertion") else: raise NotImplementedError for episode_idx in range(2): ts = env.reset() episode = [ts] if onscreen_render: ax = plt.subplot() plt_img = ax.imshow(ts.observation["images"]["angle"]) plt.ion() policy = PickAndTransferPolicy(inject_noise) for step in range(episode_len): action = policy(ts) ts = env.step(action) episode.append(ts) if onscreen_render: plt_img.set_data(ts.observation["images"]["angle"]) plt.pause(0.02) plt.close() episode_return = np.sum([ts.reward for ts in episode[1:]]) if episode_return > 0: print(f"{episode_idx=} Successful, {episode_return=}") else: print(f"{episode_idx=} Failed") if __name__ == "__main__": test_task_name = "sim_transfer_cube_scripted" test_policy(test_task_name)
/robot_awe-0.1.tar.gz/robot_awe-0.1/act/scripted_policy.py
0.591133
0.405331
scripted_policy.py
pypi
import h5py import argparse import numpy as np from tqdm import tqdm from scipy.spatial.transform import Rotation import robomimic import robomimic.utils.obs_utils as ObsUtils import robomimic.utils.env_utils as EnvUtils import robomimic.utils.file_utils as FileUtils def main(args): # create environment (delta control) dummy_spec = dict( obs=dict( low_dim=["robot0_eef_pos"], rgb=[], ), ) ObsUtils.initialize_obs_utils_with_obs_specs(obs_modality_specs=dummy_spec) env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=args.dataset) env = EnvUtils.create_env_from_metadata(env_meta=env_meta, render_offscreen=True) # load the dataset f = h5py.File(args.dataset, "r+") demos = list(f["data"].keys()) inds = np.argsort([int(elem[5:]) for elem in demos]) demos = [demos[i] for i in inds] assert args.start_idx >= 0 and args.end_idx < len(demos) for idx in tqdm(range(args.start_idx, args.end_idx + 1), desc="Converting actions"): ep = demos[idx] states = f[f"data/{ep}/states"][()] traj_len = states.shape[0] # generate abs actions delta_actions = f[f"data/{ep}/actions"][()] action_pos = np.zeros((traj_len, 3), dtype=delta_actions.dtype) action_ori = np.zeros((traj_len, 3), dtype=delta_actions.dtype) action_gripper = delta_actions[:, -1:] # record low dim states obs = env.reset_to({"states": states[0]}) # convert to list for k in obs.keys(): obs[k] = [obs[k]] robot = env.env.robots[0] controller = robot.controller first = True for i in range(len(states)): env.reset_to({"states": states[i]}) # run controller robot.control(delta_actions[i], policy_step=True) if first: initial_state = env.get_state()["states"] first = False # read pos and ori from robots action_pos[i] = controller.ee_pos action_ori[i] = Rotation.from_matrix(controller.ee_ori_mat).as_rotvec() # record low dim states new_obs = env.get_observation() for k in obs.keys(): obs[k].append(new_obs[k]) actions = np.concatenate([action_pos, action_ori, action_gripper], axis=-1) # stack obs for k in obs.keys(): obs[k] = np.stack(obs[k], axis=0) # dump into a file of abs_actions in the original dataset f[f"data/{ep}/abs_actions"] = actions # dump into a file of abs_obs in the original dataset for k in obs.keys(): f[f"data/{ep}/abs_obs/{k}"] = obs[k] f[f"data/{ep}/initial_state"] = initial_state f.close() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--dataset", type=str, help="path to hdf5 dataset", ) # index of the trajectory to playback. If omitted, playback trajectory 0. parser.add_argument( "--start_idx", type=int, default=0, help="(optional) start index of the trajectory to playback", ) parser.add_argument( "--end_idx", type=int, default=199, help="(optional) end index of the trajectory to playback", ) args = parser.parse_args() main(args)
/robot_awe-0.1.tar.gz/robot_awe-0.1/utils/robomimic_convert_action.py
0.489015
0.396039
robomimic_convert_action.py
pypi
import h5py import argparse import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt import robomimic import robomimic.utils.obs_utils as ObsUtils import robomimic.utils.env_utils as EnvUtils import robomimic.utils.file_utils as FileUtils from robot_awe.extract_waypoints import ( greedy_waypoint_selection, dp_waypoint_selection, backtrack_waypoint_selection, ) from utils import plot_3d_trajectory num_waypoints = [] num_frames = [] def main(args): # create two environments for delta and absolute control, respectively dummy_spec = dict( obs=dict( low_dim=["robot0_eef_pos"], rgb=[], ), ) ObsUtils.initialize_obs_utils_with_obs_specs(obs_modality_specs=dummy_spec) env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=args.dataset) # add linear interpolators for pos and ori env_meta["env_kwargs"]["controller_configs"]["interpolation"] = "linear" # absolute control env_meta["env_kwargs"]["controller_configs"]["control_delta"] = False env_meta["env_kwargs"]["controller_configs"]["multiplier"] = args.multiplier env = EnvUtils.create_env_from_metadata(env_meta=env_meta, render_offscreen=True) # load the dataset f = h5py.File(args.dataset, "r+") demos = list(f["data"].keys()) inds = np.argsort([int(elem[5:]) for elem in demos]) demos = [demos[i] for i in inds] # the third term in args.dataset is the task name task = args.dataset.split("/")[2] assert args.start_idx >= 0 and args.end_idx < len(demos) for idx in tqdm( range(args.start_idx, args.end_idx + 1), desc="Visualizing waypoints" ): ep = demos[idx] # prepare initial states to reload from states = f[f"data/{ep}/states"][()] initial_states = [] for i in range(len(states)): initial_states.append(dict(states=states[i])) initial_states[i]["model"] = f[f"data/{ep}"].attrs["model_file"] traj_len = states.shape[0] # load the ground truth eef pos and rot, joint pos, and gripper qpos eef_pos = f[f"data/{ep}/obs/robot0_eef_pos"][()] eef_quat = f[f"data/{ep}/obs/robot0_eef_quat"][()] joint_pos = f[f"data/{ep}/obs/robot0_joint_pos"][()] gt_states = [] for i in range(traj_len): gt_states.append( dict( robot0_eef_pos=eef_pos[i], robot0_eef_quat=eef_quat[i], robot0_joint_pos=joint_pos[i], ) ) # load absolute actions try: actions = f[f"data/{ep}/abs_actions"][()] except: print("No absolute actions found, need to convert first.") raise NotImplementedError # select waypoints automatically if args.method == "greedy": waypoint_selection = greedy_waypoint_selection elif args.method == "dp": waypoint_selection = dp_waypoint_selection elif args.method == "backtrack": waypoint_selection = backtrack_waypoint_selection fig = plt.figure( figsize=(10 * len(args.err_threshold), 10) ) # adjusted size based on the number of thresholds for i, err_thresh in enumerate(args.err_threshold): ax = fig.add_subplot(1, len(args.err_threshold), i + 1, projection="3d") waypoints = waypoint_selection( env=env, actions=actions, gt_states=gt_states, err_threshold=err_thresh, initial_states=initial_states, remove_obj=True, ) num_waypoints.append(len(waypoints)) num_frames.append(traj_len) # ax.set_xlabel("x") # ax.set_ylabel("y") # ax.set_zlabel("z") # remove the ticks # ax.set_xticks([]) # ax.set_yticks([]) # ax.set_zticks([]) # remove the tick labels ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_zticklabels([]) ax.set_title(f"Error budget = {err_thresh}", fontsize=26) gt_pos = [s["robot0_eef_pos"] for s in gt_states] plot_3d_trajectory(ax, gt_pos, label="ground truth", legend=False) # waypoint_states is the slice of gt_pos that corresponds to the waypoints # prepend 0 to waypoints to include the initial state waypoints = [0] + waypoints waypoint_states = [gt_pos[i] for i in waypoints] plot_3d_trajectory(ax, waypoint_states, label="waypoints", legend=False) fig.suptitle( f"Task: {task}", fontsize=30 ) # set a common title for all subplots handles, labels = ax.get_legend_handles_labels() fig.legend( handles, labels, loc="lower center", ncol=2, fontsize=26 ) # larger font size for legend, adjusted position fig.tight_layout(rect=[0, 0.03, 1, 0.95]) # adjust the subplot margins # fig.tight_layout(rect=[0, 0.03, 1, 0.8]) # adjust the subplot margins # fig.subplots_adjust(bottom=0.2, top=0.8) # reduce the margin between the subplots fig.subplots_adjust(wspace=0.01, hspace=0.01) # reduce left and right margins fig.subplots_adjust(left=0.05, right=0.95) fig.savefig(f"plot/epsilon/waypoint_{task}.png") plt.close(fig) f.close() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--dataset", type=str, default="robomimic/datasets/can/ph/low_dim.hdf5", help="path to hdf5 dataset", ) # index of the trajectory to playback. If omitted, playback trajectory 0. parser.add_argument( "--start_idx", type=int, default=1, help="(optional) start index of the trajectory to playback", ) parser.add_argument( "--end_idx", type=int, default=1, help="(optional) end index of the trajectory to playback", ) # method (possible values: greedy, dp, backtrack) parser.add_argument( "--method", type=str, default="dp", help="(optional) method for waypoint selection", ) # error threshold for reconstructing the trajectory parser.add_argument( "--err_threshold", type=float, nargs="+", # updated to accept list of floats default=[0.01, 0.005], # default is list of thresholds help="(optional) error thresholds for reconstructing the trajectory", ) # multiplier for the simulation steps (may need more steps to ensure the robot reaches the goal pose) parser.add_argument( "--multiplier", type=int, default=10, help="(optional) multiplier for the simulation steps", ) args = parser.parse_args() main(args)
/robot_awe-0.1.tar.gz/robot_awe-0.1/utils/robomimic_visualize_waypoint.py
0.553505
0.318744
robomimic_visualize_waypoint.py
pypi
import json from robot.libraries.BuiltIn import BuiltIn from robot.api.deco import keyword, library from axe_selenium_python import Axe from selenium import webdriver from robot.api import logger from robot.utils.asserts import fail from .version import VERSION @library class RobotAxeLibrary: ROBOT_LIBRARY_SCOPE = 'GLOBAL' ROBOT_LIBRARY_VERSION = VERSION def __init__(self): self.axe = None self.result = None self.violations = None self.count_violations = None self.count_issues = None self.ignore_htmls = None self.ignore_targets = None def helloA11y(self): print("Hello from A11y") @keyword def check_for_accessibility_issues(self, type_issues=None, specific_issues=None, rules_config=None, ignore_targets=None, ignore_htmls=None, report_file='report_accessibility.json'): """ Executes accessibility tests in the current page and write the issues into the file pass in report_file variable. Return report, results and self.result With this keyword you can pass the type of issues, rules and wcag levels you want to search, also you can filter out specific elements from the page | = Attribute = | = Description = | | type_issues | Pass the type of issues you want to check for, for example: tags or rule | | specific_issues | Pass the specific issues you want to check, for example: for tags use wcag2a, wcag2aa, for rules use area-alt, color-contrast | | rules_config | Pass the rules to check or to not check, using a dict with the rules follow by the key enabled, for example like: {"rules": {"color-contrast": { "enabled": 0 }, "heading-order": { "enabled": 1 }}} | | ignore_targets | Pass the elements you dont want to check, using the target information | | ignore_htmls | Pass the elements you dont want to check, using the html information | | report_file | File to store accessibility issues result for example: report_accessibility.json | """ seleniumlib = BuiltIn().get_library_instance('SeleniumLibrary') webdriver = seleniumlib.driver if ignore_htmls == None: ignore_htmls = [] else: logger.info('Ignoring elements html: {}'.format(ignore_htmls)) if ignore_targets == None: ignore_targets = [] else: logger.info('Ignoring elements target: {}'.format(ignore_targets)) self.ignore_htmls = ignore_htmls self.ignore_targets = ignore_targets self.axe = Axe(webdriver) self.axe.inject() if(type_issues != None and specific_issues != None): logger.info('Searching for accesibility issues with options: {}'.format(specific_issues)) self.result = self.axe.run( options={ "runOnly": { "type": type_issues, "values": specific_issues } } ) elif(rules_config != None): self.result = self.axe.run( options= rules_config ) else: self.result = self.axe.run() self.axe.write_results(self.result, report_file) report = self.axe.report(self.result["violations"]) results = {"violations":self.result["violations"], "count_violations":len(self.result["violations"])} self.violations = self.result["violations"] self.count_violations = len(self.result["violations"]) self.count_issues = 0 for violation in self.violations: for node in violation['nodes']: if node['html'] not in ignore_htmls and node['target'][0] not in ignore_targets: self.count_issues += 1 return report, results, self.result @keyword def get_json_accessibility_result(self): """ Return accessibility test self.result in Json format. Need to be used after `Check for accessibility issues` keyword """ result = json.dumps(self.result, indent = 3) logger.info(result) return result @keyword def should_not_exceed_maximum_violations(self, maxAcceptableViolations): """ Return an error if count_violations > maxAcceptableViolations """ logger.info('You have {} violations'.format(self.count_violations)) self.log_accessibility_result() if(self.count_violations > int(maxAcceptableViolations)): return fail("You have {} violations and you just accept {}".format(self.count_violations, maxAcceptableViolations)) @keyword def should_not_exceed_maximum_issues(self, maxAcceptableIssues): """ Return an error if count_issues > maxAcceptableIssues """ logger.info('You have {} issues'.format(self.count_issues)) self.log_accessibility_result() if(self.count_issues > int(maxAcceptableIssues)): return fail("You have {} issues and you just accept {}".format(self.count_issues, maxAcceptableIssues)) @keyword def log_accessibility_result(self): """ Inserts accessibility result into the `log.html` file. Need to be used after `Check for accessibility issues` keyword """ for violation in self.violations: nodes_violation = 0 for node in violation['nodes']: if node['html'] not in self.ignore_htmls and node['target'][0] not in self.ignore_targets: nodes_violation = True if nodes_violation: style = """ <style> #demo table, #demo th, #demo td{ border: 1px dotted black; border-collapse: collapse; table-layout: auto; } </style> """ table_issues = """ <table id="demo" style="width:100%%"> <tr> <th style="width:10%%">Violation id</th> <th style="width:50%%">Violation</th> <th style="width:5%%">How to fix</th> <th style="width:7%%">Impact</th> <th>Tags</th> </tr> <tr> <td style="text-align:center">%s</td> <td>%s</td> <td style="text-align:center"><a href="%s">Link</a></td> <td style="text-align:center">%s</td> <td style="text-align:center">%s</td> </tr> </table> """%(str(violation['id']), str(violation['description']), str(violation['helpUrl']), str(violation['impact']), str(violation['tags'])) html_text = style + table_issues logger.info(html_text, html=True) for violation in self.violations: for node in violation['nodes']: if node['html'] not in self.ignore_htmls and node['target'][0] not in self.ignore_targets: style = """ <style> #demo table, #demo th, #demo td{ border: 1px dotted black; border-collapse: collapse; table-layout: auto; } </style> """ table_issues = """ <table id="demo" style="width:100%%"> <tr> <th style="width:10%%">Violation id</th> <th>Issue</th> <th style="width:20%%">html</th> <th style="width:20%%">target</th> </tr> <tr> <td style="text-align:center">%s</td> <td>%s</td> <td>%s</td> <td>%s</td> </tr> </table> """%(violation['id'], node['any'][0]['message'], node['html'].replace('<', '&lt;').replace('>', '&gt;'), node['target'][0]) new_html = style + table_issues logger.info(new_html, html=True)
/robot-axelibrary-0.1.8.tar.gz/robot-axelibrary-0.1.8/src/RobotAxeLibrary/RobotAxeLibrary.py
0.589244
0.181535
RobotAxeLibrary.py
pypi
import threading import time import random class Obstruction(Exception): pass class RobotMovement(threading.Thread): """ Lowest level robot movement object """ def __init__(self, robot, *args, **kwargs): self.robot = robot super(RobotMovement, self).__init__(*args, **kwargs) class ObstacleAvoidance(RobotMovement): """ Movement class for an autonomous, obstacle avoidance robot """ _running = True max_evasion_time = 5 def __init__(self, robot, *args, **kwargs): """ Updates the class to tell the robot to brake on receiving an ON_OBSTACLE_[[position]] signal """ super(ObstacleAvoidance, self).__init__(robot, *args, **kwargs) for sensor in self.robot.sensors: if 'FRONT' in sensor.position: self.robot.add_callback('ON_OBSTACLE_%s' % sensor.position, self.robot.brake) @property def clear_ahead(self): return self.robot.clear_ahead @property def clear_left(self): return self.robot.clear_left @property def clear_right(self): return self.robot.clear_right def pick_direction(self): """ Checks obstacle sensors to see if there is a clear path on either side to turn to. If Robot cannot establish that one side is clearer than the other, a direction is picked at random :return: `self.robot.turn_left` or `self.robot.turn_right` """ if self.clear_left == self.clear_right: turn = self.pick_random_direction() elif self.clear_left > self.clear_right: turn = self.robot.turn_left else: turn = self.robot.turn_right return turn def pick_random_direction(self): """ Chooses a random direction in which to turn while evading if robot cannot establish a preferable direction in which to turn """ return random.choice([self.robot.turn_left, self.robot.turn_right]) def evade(self): """ This method is called if an obstacle is detected - attempts to move the robot away from the obstacle """ self.robot.brake() turn = self.pick_direction() timer = 0 while not self.clear_ahead: turn() time.sleep(0.1) timer += 0.1 if timer > self.max_evasion_time: raise Obstruction('Unable to find a clear path to proceed') def run(self): while self._running: if self.robot.clear_ahead: self.robot.move_forwards() self.evade() def stop(self): self._running = False self.robot.brake() self.join()
/robot-blocks-0.1.0a8.tar.gz/robot-blocks-0.1.0a8/robotblocks/movement.py
0.830319
0.33805
movement.py
pypi
import threading import time from blinker import signal import logging class BaseObstacleSensor(threading.Thread): """ Lowest level sensor object """ threshold = 0 constant = False output = 0 _reading = True delay = 0 def __init__(self, gpio, position, **kwargs): super(BaseObstacleSensor, self).__init__() self.position = position self.input_pin = kwargs.pop('input_pin', None) self.output_pin = kwargs.pop('output_pin', None) self.gpio = gpio self.gpio_setup() def gpio_setup(self): """ sets up the GPIO pins prior to use """ self.gpio.setup(self.output_pin, self.gpio.OUT) self.gpio.setup(self.input_pin, self.gpio.IN) self.gpio.output(self.output_pin, False) def test(self): """ Must be implemented by subclasses and return a number (e.g. 0 for obstruction, 1 for clear) """ raise NotImplementedError() @property def clear(self): return self.output > self.threshold def read(self): output = self.test() if output < self.threshold: signal('ON_OBSTACLE_%s' % self.position).send() return output def run(self): while True: self.output = self.read() time.sleep(self.delay) if not self._reading: break def stop(self): self._reading = False self.join() class InfraredSensor(BaseObstacleSensor): """ Infra red sensor """ threshold = 0.1 # self.test() will return 0 if not clear or 1 if clear def test(self): return self.gpio.input(self.input_pin) == 0 class UltrasoundSensor(BaseObstacleSensor): """ Ultrasound sensor """ threshold = 30 # when self.test() returns less than this value, self.clear will return False delay = 0.1 def test(self): """ Takes a reading from the ultrasound sensor and returns the distance in cm """ pulse_end, pulse_start = 0, 0 self.gpio.output(self.output_pin, True) time.sleep(0.00001) self.gpio.output(self.output_pin, False) while self.gpio.input(self.input_pin) == 0: pulse_start = time.time() while self.gpio.input(self.input_pin) == 1: pulse_end = time.time() pulse_duration = pulse_end - pulse_start distance = round(pulse_duration * 17150, 2) logging.info('Ultrasound sensor at position %s returned a reading of %s cm' % (self.position, distance)) return distance
/robot-blocks-0.1.0a8.tar.gz/robot-blocks-0.1.0a8/robotblocks/hardware/sensors.py
0.744471
0.25731
sensors.py
pypi
import sys import os.path import operator from functools import partial from io import StringIO from robot_brains import symtable from robot_brains.scanner import Token, syntax_error Num_label_bits = 1 # running bit Max_bits_used = 0 def add_hook_args_to_todo(cls, hook_name, fn, list): cls.add_post_hook(hook_name, lambda *args: list.append(partial(fn, *args))) def todo(list): def decorator(fn): list.append(fn) return fn return decorator def todo_with_args(cls, hook_name, list): def decorator(fn): add_hook_args_to_todo(cls, hook_name, fn, list) return fn return decorator def run_todo_lists(): for list_name, list in Target_language.Todo_lists: print("Running", list_name) for fn in list: fn() def init_code_generator(target_language, rootdir): global Target_language, Precedence_lookup, Rootdir Target_language = target_language Rootdir = rootdir # {operator: (precedence_level, assoc)} Precedence_lookup = parse_precedence(Target_language.Precedence) target_language.Precedence_lookup = Precedence_lookup def relative_path(filename): return filename[len(Rootdir) + 1:] def generate(opmode): Target_language.Opmode = opmode print("generate: running opmode.setup") opmode.setup() print() print("generate: running Todo_lists") run_todo_lists() print("generate: done") def parse_precedence(precedence): return {op: (i, assoc) for i, (assoc, *opers) in enumerate(reversed(precedence), 1) for op in opers } def wrap(expr, precedence, side): if expr.precedence > precedence or \ expr.precedence == precedence and expr.assoc == side: return expr.code return f"({expr.code})" def compile_unary(op, expr): return Target_language.Unary_exprs[op.upper()](expr) def compile_binary(expr1, op, expr2): return Target_language.Binary_exprs[op.upper()](expr1, expr2) def unary(op, format, expr): precedence, assoc = Precedence_lookup[op] return (precedence, assoc, format.format(wrap(expr, precedence, 'right'))) def binary(op, format, left_expr, right_expr, *, result_op=None): precedence, assoc = Precedence_lookup[op] if result_op is None: result_prec, result_assoc = precedence, assoc else: result_prec, result_assoc = Precedence_lookup(result_op) return (result_prec, result_assoc, format.format(wrap(left_expr, precedence, 'left'), wrap(right_expr, precedence, 'right'))) def subscript(left, right): # range checking must be done somewhere else... precedence, assoc = Precedence_lookup['['] return (precedence, assoc, f"{wrap(left, precedence, 'left')}[{right.code}]") def translate_name(name): if isinstance(name, Token): name = name.value if not name[-1].isdecimal() and not name[-1].isalpha(): return name[:-1].lower() + '_' if name.lower() in Target_language.Reserved_words: return name.lower() + '_' return name.lower() def translate_type(type): type = type.get_type() if isinstance(type, symtable.Builtin_type): return Target_language.Types[type.name.lower()] if isinstance(type, symtable.Label_type): return Target_language.translate_label_type(type) assert isinstance(type, symtable.Typename_type) return translate_type(type.typedef.type)
/robot_brains-0.1.0-py3-none-any.whl/robot_brains/code_generator.py
0.4206
0.217421
code_generator.py
pypi
_tabversion = '3.10' _lr_method = 'LALR' _lr_signature = "filenonassocNOTnonassocEQNEQnonassoc<LEQLAEQ>GEQGAEQAEQNAEQleft+-right/left*%INTEGER_DIVIDErightUMINUSABSright^left.ABS AEQ AS AUTONOMOUS BOOLEAN BOOLEAN_LIT CONTINUE DIM DLT_DELIMITER DLT_MAP DLT_MASK DONE EQ FLOAT FLOAT_LIT FROM FUNCTION GAEQ GEQ GOT GOTO IDENT INTEGER INTEGER_DIVIDE INTEGER_LIT IS KEYWORD LABEL LAEQ LEQ MODULE NAEQ NATIVE_STRING_LIT NEQ NEWLINE NOT OPEQ OPMODE OPT_KEYWORD RETURN RETURNING RETURNING_TO RETURN_LABEL SET STRING STRING_LIT SUBROUTINE TAKING TELEOP TO TYPE USE VAR WITH\n pos_arguments :\n kw_arguments :\n parameter_types_list :\n steps :\n statements :\n action_statements :\n dotted_prefix :\n \n const_expr : STRING_LIT\n | FLOAT_LIT\n | INTEGER_LIT\n | BOOLEAN_LIT\n expr : primary\n primary : simple_primary\n native_element : primary\n | NATIVE_STRING_LIT\n type : simple_type\n statement : simple_statement newlines\n | dlt\n actions : action\n parameter_types_list : parameter_types_list1\n opmode_type : AUTONOMOUS\n | TELEOP\n \n simple_primary : '(' expr ')'\n simple_type : '(' type ')'\n const_expr : '(' const_expr ')'\n returning_opt : RETURNING parameter_types\n taking_opt : TAKING parameter_types\n from_opt : FROM primary\n label_decl : FUNCTION fn_name parameters set_returning_opt newlines\n | SUBROUTINE sub_name parameters newlines\n | LABEL label_name parameters newlines\n \n newlines_opt :\n newlines_opt : newlines_opt NEWLINE\n newlines : NEWLINE\n newlines : newlines NEWLINE\n opmode : opmode_type OPMODE make_opmode newlines uses typedefs\n typedefs :\n | typedefs typedef\n vartypes :\n | vartypes vartype\n parameters : pos_parameters kw_parameters\n pos_parameters : required_parameters\n pos_parameters : required_parameters '?' optional_parameters\n required_parameters :\n required_parameters : required_parameters required_parameter\n optional_parameters : optional_parameter\n optional_parameters : optional_parameters optional_parameter\n kw_parameters :\n | kw_parameters keyword pos_parameters\n returning_opt :\n uses :\n | uses use\n taking_opt :\n from_opt :\n \n conditions : condition\n parameter_types_list1 : simple_type\n kw_parameter_types : kw_parameter_type\n dimensions : dimension\n subscripts : expr\n native_elements : NATIVE_STRING_LIT\n \n kw_arguments : kw_arguments kw_argument\n statements : statements statement\n conditions : conditions condition\n pos_arguments : pos_arguments primary\n kw_parameter_types : kw_parameter_types kw_parameter_type\n parameter_types_list1 : parameter_types_list1 simple_type\n action_statements : action_statements simple_statement newlines\n action_statements : action_statements continue newlines\n native_elements : native_elements native_element\n dotted_prefix : dotted_prefix IDENT '.'\n \n dimensions : dimensions ',' dimension\n subscripts : subscripts ',' expr\n actions : actions action\n step : label_decl typedefs vartypes statements\n \n steps : steps step\n \n arguments : pos_arguments kw_arguments\n kw_argument : KEYWORD pos_arguments\n parameter_types : pos_parameter_types kw_parameter_types\n \n kw_parameter_type : KEYWORD pos_parameter_types\n kw_parameter_type : OPT_KEYWORD pos_parameter_types\n \n module : MODULE make_module parameters newlines uses typedefs vartypes steps\n \n type : SUBROUTINE taking_opt\n | LABEL taking_opt\n \n type : FUNCTION taking_opt returning_opt\n \n simple_type : INTEGER\n | FLOAT\n | BOOLEAN\n | STRING\n | MODULE\n | TYPE\n simple_type : dotted_prefix IDENT\n dimension : const_expr\n \n primary : STRING_LIT\n | FLOAT_LIT\n | INTEGER_LIT\n | BOOLEAN_LIT\n \n simple_primary : RETURN_LABEL\n \n simple_primary : simple_primary '.' RETURN_LABEL\n \n simple_primary : IDENT\n lvalue : IDENT\n \n simple_primary : simple_primary '.' IDENT\n lvalue : simple_primary '.' IDENT\n \n simple_primary : simple_primary '[' subscripts ']'\n lvalue : simple_primary '[' subscripts ']'\n primary : GOT KEYWORDprimary : GOT IDENT '.' KEYWORDprimary : GOT MODULE '.' KEYWORDprimary : GOT IDENTprimary : GOT IDENT '.' IDENTprimary : GOT MODULE '.' IDENT\n expr : ABS expr\n | NOT expr\n | '-' expr %prec UMINUS\n \n expr : expr '^' expr\n | expr '*' expr\n | expr '/' expr\n | expr INTEGER_DIVIDE expr\n | expr '%' expr\n | expr '+' expr\n | expr '-' expr\n | expr '<' expr\n | expr LEQ expr\n | expr LAEQ expr\n | expr '>' expr\n | expr GEQ expr\n | expr GAEQ expr\n | expr EQ expr\n | expr AEQ expr\n | expr NEQ expr\n | expr NAEQ expr\n \n expr : native_elements\n \n parameter_types : pos_parameter_types1\n pos_parameter_types1 : parameter_types_list1\n pos_parameter_types : parameter_types_list\n \n pos_parameter_types1 : parameter_types_list '?' parameter_types_list1\n pos_parameter_types : parameter_types_list '?' parameter_types_list1\n \n const_expr : '-' const_expr %prec UMINUS\n \n const_expr : const_expr '^' const_expr\n | const_expr '*' const_expr\n | const_expr '/' const_expr\n | const_expr '%' const_expr\n | const_expr '+' const_expr\n | const_expr '-' const_expr\n \n const_expr : NOT const_expr\n \n continue : CONTINUE\n \n simple_statement : SET extended_kws lvalue TO normal_kws primary\n \n simple_statement : GOTO primary arguments\n \n simple_statement : extended_kws RETURN arguments from_opt normal_kws\n \n simple_statement : extended_kws RETURN arguments from_opt TO primary normal_kws\n extended_kws : normal_kws : \n simple_statement : primary arguments\n \n simple_statement : primary arguments RETURNING_TO primary\n \n simple_statement : lvalue OPEQ primary\n \n simple_statement : extended_kws DONE normal_kws\n \n simple_statement : extended_kws DONE WITH IDENT normal_kws\n \n simple_statement : native_elements\n \n file : newlines_opt opmode\n | newlines_opt module\n \n simple_primary : '{' primary arguments '}'\n make_opmode :make_module :\n required_parameter : IDENT\n \n optional_parameter : IDENT\n keyword : KEYWORDkeyword : OPT_KEYWORD\n use : USE IDENT arguments newlines\n \n use : USE IDENT AS IDENT arguments newlines\n \n typedef : TYPE IDENT IS type newlines\n set_returning_opt : returning_opt\n vartype : VAR IDENT IS type newlines\n \n vartype : DIM IDENT '[' dimensions ']' newlines\n fn_name : IDENTsub_name : IDENTlabel_name : IDENT\n dlt : DLT_DELIMITER NEWLINE dlt_conditions DLT_DELIMITER NEWLINE dlt_actions DLT_DELIMITER newlines\n \n condition : DLT_MASK expr newlines\n \n action : DLT_MAP action_statements\n \n action : DLT_MAP newlines action_statements\n dlt_conditions : conditionsdlt_actions : actions" _lr_action_items = {'NEWLINE':([0,2,5,7,10,11,12,13,14,15,16,17,18,19,21,22,24,26,27,28,29,30,31,36,37,40,43,45,51,52,53,54,55,56,57,58,59,61,63,72,73,74,75,76,77,79,80,81,82,83,85,86,87,90,91,94,98,99,102,103,104,105,106,107,110,111,113,114,116,117,118,119,120,125,143,144,145,146,147,148,151,152,153,154,165,167,169,170,171,173,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,200,201,202,203,204,205,216,217,218,219,221,222,225,230,232,233,234,235,236,237,246,247,248,249,250,251,252,254,255,257,258,265,266,268,270,275,277,279,281,282,283,285,286,288,290,291,293,294,295,299,300,301,304,305,306,307,308,310,311,],[-32,5,-33,-162,-161,-44,17,17,-48,-42,24,-34,24,-41,-45,-163,-35,-44,-165,-166,-43,-46,-164,-49,-47,-1,17,-2,24,-1,-76,-64,-13,-93,-94,-95,-96,-99,-97,-90,17,-16,-53,-53,-53,-85,-86,-87,-88,-89,17,-61,-1,-105,-108,-12,-131,-60,-44,-173,-44,-174,-44,-175,24,-82,-83,-50,-91,24,-77,-98,-101,-23,-111,-112,-113,-69,-14,-15,-50,17,17,17,-27,-132,-133,-56,-84,-24,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,-129,-130,-160,17,-170,24,24,24,17,-78,-57,-3,-3,-66,-26,17,-1,-99,-157,259,-13,24,24,-65,-79,-134,-20,-80,-135,24,-1,-151,-152,-1,-54,-155,-154,-147,-101,-136,-151,-151,-153,292,17,-103,-148,-28,-156,24,-146,-151,17,-149,17,24,24,17,17,-145,24,24,]),'MODULE':([0,2,5,50,60,72,78,79,80,81,82,83,108,112,116,169,170,172,173,218,219,220,221,249,251,262,277,],[-32,7,-33,83,92,-90,83,-85,-86,-87,-88,-89,83,83,-91,83,-56,83,-24,83,83,83,-66,83,83,83,83,]),'AUTONOMOUS':([0,2,5,],[-32,8,-33,]),'TELEOP':([0,2,5,],[-32,9,-33,]),'$end':([1,3,4,16,17,18,23,24,25,32,33,35,38,41,46,47,51,65,66,101,110,117,150,199,202,203,204,224,226,236,237,252,305,],[0,-158,-159,-51,-34,-51,-37,-35,-37,-36,-52,-39,-38,-4,-81,-40,-167,-75,-37,-39,-169,-168,-5,-74,-30,-31,-171,-62,-18,-29,-172,-17,-176,]),'OPMODE':([6,8,9,],[10,-21,-22,]),'KEYWORD':([7,11,14,15,19,21,22,26,27,28,29,30,31,36,37,40,45,52,53,54,55,56,57,58,59,60,61,63,72,79,80,81,82,83,86,87,90,91,100,102,103,104,105,106,107,112,116,118,119,120,123,124,125,166,168,169,170,172,173,175,177,178,179,180,198,216,217,218,219,221,230,232,235,246,247,248,249,250,251,254,258,275,277,286,],[-162,-44,-48,-42,27,-45,-163,-44,-165,-166,-43,-46,-164,-49,-47,-1,-2,-1,87,-64,-13,-93,-94,-95,-96,90,-99,-97,-90,-85,-86,-87,-88,-89,-61,-1,-105,-108,-1,-44,-173,-44,-174,-44,-175,-3,-91,-77,-98,-101,178,179,-23,218,-134,-20,-56,-3,-24,-103,-109,-106,-107,-110,-160,218,-57,-3,-3,-66,-1,-99,-13,-65,-79,-134,-20,-80,-136,-1,-1,-101,-136,-103,]),'OPT_KEYWORD':([7,11,14,15,19,21,22,26,27,28,29,30,31,36,37,72,79,80,81,82,83,102,103,104,105,106,107,112,116,166,168,169,170,172,173,216,217,218,219,221,246,247,248,249,250,251,277,],[-162,-44,-48,-42,28,-45,-163,-44,-165,-166,-43,-46,-164,-49,-47,-90,-85,-86,-87,-88,-89,-44,-173,-44,-174,-44,-175,-3,-91,219,-134,-20,-56,-3,-24,219,-57,-3,-3,-66,-65,-79,-134,-20,-80,-136,-136,]),'?':([7,11,15,21,22,26,27,28,72,79,80,81,82,83,102,103,104,105,106,107,112,116,168,169,170,172,173,218,219,221,248,249,],[-162,-44,20,-45,-163,-44,-165,-166,-90,-85,-86,-87,-88,-89,-44,-173,-44,-174,-44,-175,-3,-91,220,-20,-56,-3,-24,-3,-3,-66,262,-20,]),'IDENT':([7,11,15,17,20,21,22,24,26,27,28,29,30,31,34,37,38,39,40,44,45,47,48,49,50,52,54,55,56,57,58,59,60,61,62,63,64,66,67,68,69,72,78,79,80,81,82,83,84,87,88,89,90,91,95,96,97,98,99,100,101,102,103,104,105,106,107,108,110,112,116,118,119,120,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,169,170,172,173,174,175,176,177,178,179,180,198,199,202,203,204,218,219,220,221,224,226,227,230,231,232,233,235,236,237,249,251,252,253,254,256,258,260,261,262,267,269,274,275,277,278,280,286,287,289,299,303,304,305,309,310,311,],[-162,-44,22,-34,31,-45,-163,-35,-44,-165,-166,31,-46,-164,40,-47,-38,42,-1,52,61,-40,70,71,-7,-1,-64,-13,-93,-94,-95,-96,91,-99,61,-97,61,-37,103,105,107,-90,-7,-85,-86,-87,-88,-89,116,-1,120,61,-105,-108,61,61,61,61,-60,-1,-39,-44,-173,-44,-174,-44,-175,-7,-169,-7,-91,61,-98,-101,177,180,-23,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,-69,-14,-15,-5,-7,-56,-7,-24,-70,-103,61,-109,-106,-107,-110,-160,232,-30,-31,-171,-7,-7,-7,-66,-62,-18,-150,-1,61,-99,61,-13,-29,-172,-7,-7,-17,232,-1,61,-1,275,61,-7,281,61,61,-101,-7,-151,61,-103,61,61,-6,232,-6,-176,232,-67,-68,]),'RETURNING':([14,15,19,21,22,26,27,28,29,30,31,36,37,72,77,79,80,81,82,83,102,103,114,116,151,165,167,169,170,173,216,217,218,219,221,246,247,248,249,250,251,277,],[-48,-42,-41,-45,-163,-44,-165,-166,-43,-46,-164,-49,-47,-90,-53,-85,-86,-87,-88,-89,-44,-173,172,-91,172,-27,-132,-133,-56,-24,-78,-57,-3,-3,-66,-65,-79,-134,-20,-80,-135,-136,]),'USE':([16,17,18,23,24,25,33,51,117,],[-51,-34,-51,34,-35,34,-52,-167,-168,]),'TYPE':([16,17,18,23,24,25,32,33,35,38,50,51,66,72,78,79,80,81,82,83,101,108,110,112,116,117,169,170,172,173,202,203,218,219,220,221,236,249,251,262,277,],[-51,-34,-51,-37,-35,-37,39,-52,39,-38,72,-167,-37,-90,72,-85,-86,-87,-88,-89,39,72,-169,72,-91,-168,72,-56,72,-24,-30,-31,72,72,72,-66,-29,72,72,72,72,]),'VAR':([17,18,24,25,33,35,38,41,47,51,66,101,110,117,150,202,203,204,236,237,],[-34,-51,-35,-37,-52,-39,-38,48,-40,-167,-37,-39,-169,-168,48,-30,-31,-171,-29,-172,]),'DIM':([17,18,24,25,33,35,38,41,47,51,66,101,110,117,150,202,203,204,236,237,],[-34,-51,-35,-37,-52,-39,-38,49,-40,-167,-37,-39,-169,-168,49,-30,-31,-171,-29,-172,]),'FUNCTION':([17,18,24,25,33,35,38,41,46,47,50,51,65,66,78,101,108,110,117,150,199,202,203,204,224,226,236,237,252,305,],[-34,-51,-35,-37,-52,-39,-38,-4,67,-40,77,-167,-75,-37,77,-39,77,-169,-168,-5,-74,-30,-31,-171,-62,-18,-29,-172,-17,-176,]),'SUBROUTINE':([17,18,24,25,33,35,38,41,46,47,50,51,65,66,78,101,108,110,117,150,199,202,203,204,224,226,236,237,252,305,],[-34,-51,-35,-37,-52,-39,-38,-4,68,-40,75,-167,-75,-37,75,-39,75,-169,-168,-5,-74,-30,-31,-171,-62,-18,-29,-172,-17,-176,]),'LABEL':([17,18,24,25,33,35,38,41,46,47,50,51,65,66,78,101,108,110,117,150,199,202,203,204,224,226,236,237,252,305,],[-34,-51,-35,-37,-52,-39,-38,-4,69,-40,76,-167,-75,-37,76,-39,76,-169,-168,-5,-74,-30,-31,-171,-62,-18,-29,-172,-17,-176,]),'SET':([17,24,38,47,66,101,110,150,199,202,203,204,224,226,236,237,252,299,303,304,305,309,310,311,],[-34,-35,-38,-40,-37,-39,-169,-5,227,-30,-31,-171,-62,-18,-29,-172,-17,-6,227,-6,-176,227,-67,-68,]),'GOTO':([17,24,38,47,66,101,110,150,199,202,203,204,224,226,236,237,252,299,303,304,305,309,310,311,],[-34,-35,-38,-40,-37,-39,-169,-5,231,-30,-31,-171,-62,-18,-29,-172,-17,-6,231,-6,-176,231,-67,-68,]),'DLT_DELIMITER':([17,24,38,47,66,101,110,150,199,202,203,204,224,226,236,237,252,271,272,273,284,293,296,297,298,299,302,303,304,305,309,310,311,],[-34,-35,-38,-40,-37,-39,-169,-5,234,-30,-31,-171,-62,-18,-29,-172,-17,283,-180,-55,-63,-177,301,-181,-19,-6,-73,-178,-6,-176,-179,-67,-68,]),'STRING_LIT':([17,24,38,40,45,47,52,54,55,56,57,58,59,61,62,63,64,66,87,89,90,91,95,96,97,98,99,100,101,109,110,118,119,120,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,162,163,164,175,176,177,178,179,180,198,199,202,203,204,206,207,208,209,210,211,212,224,226,230,231,232,233,235,236,237,252,254,256,258,261,269,274,275,278,280,286,287,289,299,303,304,305,309,310,311,],[-34,-35,-38,-1,56,-40,-1,-64,-13,-93,-94,-95,-96,-99,56,-97,56,-37,-1,56,-105,-108,56,56,56,56,-60,-1,-39,158,-169,56,-98,-101,-23,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,-69,-14,-15,-5,158,158,158,-103,56,-109,-106,-107,-110,-160,56,-30,-31,-171,158,158,158,158,158,158,158,-62,-18,-1,56,-99,56,-13,-29,-172,-17,-1,56,-1,56,56,56,-101,-151,56,-103,56,56,-6,56,-6,-176,56,-67,-68,]),'FLOAT_LIT':([17,24,38,40,45,47,52,54,55,56,57,58,59,61,62,63,64,66,87,89,90,91,95,96,97,98,99,100,101,109,110,118,119,120,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,162,163,164,175,176,177,178,179,180,198,199,202,203,204,206,207,208,209,210,211,212,224,226,230,231,232,233,235,236,237,252,254,256,258,261,269,274,275,278,280,286,287,289,299,303,304,305,309,310,311,],[-34,-35,-38,-1,57,-40,-1,-64,-13,-93,-94,-95,-96,-99,57,-97,57,-37,-1,57,-105,-108,57,57,57,57,-60,-1,-39,159,-169,57,-98,-101,-23,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,-69,-14,-15,-5,159,159,159,-103,57,-109,-106,-107,-110,-160,57,-30,-31,-171,159,159,159,159,159,159,159,-62,-18,-1,57,-99,57,-13,-29,-172,-17,-1,57,-1,57,57,57,-101,-151,57,-103,57,57,-6,57,-6,-176,57,-67,-68,]),'INTEGER_LIT':([17,24,38,40,45,47,52,54,55,56,57,58,59,61,62,63,64,66,87,89,90,91,95,96,97,98,99,100,101,109,110,118,119,120,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,162,163,164,175,176,177,178,179,180,198,199,202,203,204,206,207,208,209,210,211,212,224,226,230,231,232,233,235,236,237,252,254,256,258,261,269,274,275,278,280,286,287,289,299,303,304,305,309,310,311,],[-34,-35,-38,-1,58,-40,-1,-64,-13,-93,-94,-95,-96,-99,58,-97,58,-37,-1,58,-105,-108,58,58,58,58,-60,-1,-39,160,-169,58,-98,-101,-23,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,-69,-14,-15,-5,160,160,160,-103,58,-109,-106,-107,-110,-160,58,-30,-31,-171,160,160,160,160,160,160,160,-62,-18,-1,58,-99,58,-13,-29,-172,-17,-1,58,-1,58,58,58,-101,-151,58,-103,58,58,-6,58,-6,-176,58,-67,-68,]),'BOOLEAN_LIT':([17,24,38,40,45,47,52,54,55,56,57,58,59,61,62,63,64,66,87,89,90,91,95,96,97,98,99,100,101,109,110,118,119,120,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,162,163,164,175,176,177,178,179,180,198,199,202,203,204,206,207,208,209,210,211,212,224,226,230,231,232,233,235,236,237,252,254,256,258,261,269,274,275,278,280,286,287,289,299,303,304,305,309,310,311,],[-34,-35,-38,-1,59,-40,-1,-64,-13,-93,-94,-95,-96,-99,59,-97,59,-37,-1,59,-105,-108,59,59,59,59,-60,-1,-39,161,-169,59,-98,-101,-23,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,-69,-14,-15,-5,161,161,161,-103,59,-109,-106,-107,-110,-160,59,-30,-31,-171,161,161,161,161,161,161,161,-62,-18,-1,59,-99,59,-13,-29,-172,-17,-1,59,-1,59,59,59,-101,-151,59,-103,59,59,-6,59,-6,-176,59,-67,-68,]),'GOT':([17,24,38,40,45,47,52,54,55,56,57,58,59,61,62,63,64,66,87,89,90,91,95,96,97,98,99,100,101,110,118,119,120,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,175,176,177,178,179,180,198,199,202,203,204,224,226,230,231,232,233,235,236,237,252,254,256,258,261,269,274,275,278,280,286,287,289,299,303,304,305,309,310,311,],[-34,-35,-38,-1,60,-40,-1,-64,-13,-93,-94,-95,-96,-99,60,-97,60,-37,-1,60,-105,-108,60,60,60,60,-60,-1,-39,-169,60,-98,-101,-23,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,-69,-14,-15,-5,-103,60,-109,-106,-107,-110,-160,60,-30,-31,-171,-62,-18,-1,60,-99,60,-13,-29,-172,-17,-1,60,-1,60,60,60,-101,-151,60,-103,60,60,-6,60,-6,-176,60,-67,-68,]),'NATIVE_STRING_LIT':([17,24,38,47,55,56,57,58,59,61,62,63,66,89,90,91,95,96,97,98,99,101,110,119,120,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,175,176,177,178,179,180,198,199,202,203,204,224,226,233,236,237,252,261,274,299,303,304,305,309,310,311,],[-34,-35,-38,-40,-13,-93,-94,-95,-96,-99,99,-97,-37,99,-105,-108,99,99,99,148,-60,-39,-169,-98,-101,-23,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,-69,-14,-15,-5,-103,99,-109,-106,-107,-110,-160,99,-30,-31,-171,-62,-18,148,-29,-172,-17,99,99,-6,99,-6,-176,99,-67,-68,]),'(':([17,24,38,40,45,47,50,52,54,55,56,57,58,59,61,62,63,64,66,72,78,79,80,81,82,83,87,89,90,91,95,96,97,98,99,100,101,108,109,110,112,116,118,119,120,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,162,163,164,169,170,172,173,175,176,177,178,179,180,198,199,202,203,204,206,207,208,209,210,211,212,218,219,220,221,224,226,227,230,231,232,233,235,236,237,249,251,252,253,254,256,258,261,262,269,274,275,277,278,280,286,287,289,299,303,304,305,309,310,311,],[-34,-35,-38,-1,62,-40,78,-1,-64,-13,-93,-94,-95,-96,-99,62,-97,62,-37,-90,78,-85,-86,-87,-88,-89,-1,62,-105,-108,62,62,62,62,-60,-1,-39,78,162,-169,78,-91,62,-98,-101,-23,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,-69,-14,-15,-5,162,162,162,78,-56,78,-24,-103,62,-109,-106,-107,-110,-160,62,-30,-31,-171,162,162,162,162,162,162,162,78,78,78,-66,-62,-18,-150,-1,62,-99,62,-13,-29,-172,78,78,-17,62,-1,62,-1,62,78,62,62,-101,78,-151,62,-103,62,62,-6,62,-6,-176,62,-67,-68,]),'RETURN_LABEL':([17,24,38,40,45,47,52,54,55,56,57,58,59,61,62,63,64,66,87,88,89,90,91,95,96,97,98,99,100,101,110,118,119,120,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,175,176,177,178,179,180,198,199,202,203,204,224,226,227,230,231,232,233,235,236,237,252,253,254,256,258,260,261,269,274,275,278,280,286,287,289,299,303,304,305,309,310,311,],[-34,-35,-38,-1,63,-40,-1,-64,-13,-93,-94,-95,-96,-99,63,-97,63,-37,-1,119,63,-105,-108,63,63,63,63,-60,-1,-39,-169,63,-98,-101,-23,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,-69,-14,-15,-5,-103,63,-109,-106,-107,-110,-160,63,-30,-31,-171,-62,-18,-150,-1,63,-99,63,-13,-29,-172,-17,63,-1,63,-1,119,63,63,63,-101,-151,63,-103,63,63,-6,63,-6,-176,63,-67,-68,]),'{':([17,24,38,40,45,47,52,54,55,56,57,58,59,61,62,63,64,66,87,89,90,91,95,96,97,98,99,100,101,110,118,119,120,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,146,147,148,150,175,176,177,178,179,180,198,199,202,203,204,224,226,227,230,231,232,233,235,236,237,252,253,254,256,258,261,269,274,275,278,280,286,287,289,299,303,304,305,309,310,311,],[-34,-35,-38,-1,64,-40,-1,-64,-13,-93,-94,-95,-96,-99,64,-97,64,-37,-1,64,-105,-108,64,64,64,64,-60,-1,-39,-169,64,-98,-101,-23,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,-69,-14,-15,-5,-103,64,-109,-106,-107,-110,-160,64,-30,-31,-171,-62,-18,-150,-1,64,-99,64,-13,-29,-172,-17,64,-1,64,-1,64,64,64,-101,-151,64,-103,64,64,-6,64,-6,-176,64,-67,-68,]),'RETURN':([17,24,38,47,66,101,110,150,199,202,203,204,224,226,228,236,237,252,299,303,304,305,309,310,311,],[-34,-35,-38,-40,-37,-39,-169,-5,-150,-30,-31,-171,-62,-18,254,-29,-172,-17,-6,-150,-6,-176,-150,-67,-68,]),'DONE':([17,24,38,47,66,101,110,150,199,202,203,204,224,226,228,236,237,252,299,303,304,305,309,310,311,],[-34,-35,-38,-40,-37,-39,-169,-5,-150,-30,-31,-171,-62,-18,255,-29,-172,-17,-6,-150,-6,-176,-150,-67,-68,]),'DLT_MASK':([17,24,259,272,273,284,293,],[-34,-35,274,274,-55,-63,-177,]),'CONTINUE':([17,24,299,303,304,309,310,311,],[-34,-35,-6,308,-6,308,-67,-68,]),'DLT_MAP':([17,24,292,297,298,299,302,303,304,309,310,311,],[-34,-35,299,299,-19,-6,-73,-178,-6,-179,-67,-68,]),'AS':([40,],[44,]),'IS':([42,70,],[50,108,]),'}':([45,53,54,55,56,57,58,59,61,63,86,87,90,91,100,118,119,120,125,149,175,177,178,179,180,198,],[-2,-76,-64,-13,-93,-94,-95,-96,-99,-97,-61,-1,-105,-108,-1,-77,-98,-101,-23,198,-103,-109,-106,-107,-110,-160,]),'RETURNING_TO':([45,53,54,55,56,57,58,59,61,63,86,87,90,91,118,119,120,125,175,177,178,179,180,198,230,232,235,257,275,286,],[-2,-76,-64,-13,-93,-94,-95,-96,-99,-97,-61,-1,-105,-108,-77,-98,-101,-23,-103,-109,-106,-107,-110,-160,-1,-99,-13,269,-101,-103,]),'FROM':([45,53,54,55,56,57,58,59,61,63,86,87,90,91,118,119,120,125,175,177,178,179,180,198,254,265,],[-2,-76,-64,-13,-93,-94,-95,-96,-99,-97,-61,-1,-105,-108,-77,-98,-101,-23,-103,-109,-106,-107,-110,-160,-1,280,]),'TO':([45,53,54,55,56,57,58,59,61,63,86,87,90,91,118,119,120,125,175,177,178,179,180,198,232,254,263,265,275,279,286,290,],[-2,-76,-64,-13,-93,-94,-95,-96,-99,-97,-61,-1,-105,-108,-77,-98,-101,-23,-103,-109,-106,-107,-110,-160,-100,-1,278,-54,-102,289,-104,-28,]),'INTEGER':([50,72,78,79,80,81,82,83,108,112,116,169,170,172,173,218,219,220,221,249,251,262,277,],[79,-90,79,-85,-86,-87,-88,-89,79,79,-91,79,-56,79,-24,79,79,79,-66,79,79,79,79,]),'FLOAT':([50,72,78,79,80,81,82,83,108,112,116,169,170,172,173,218,219,220,221,249,251,262,277,],[80,-90,80,-85,-86,-87,-88,-89,80,80,-91,80,-56,80,-24,80,80,80,-66,80,80,80,80,]),'BOOLEAN':([50,72,78,79,80,81,82,83,108,112,116,169,170,172,173,218,219,220,221,249,251,262,277,],[81,-90,81,-85,-86,-87,-88,-89,81,81,-91,81,-56,81,-24,81,81,81,-66,81,81,81,81,]),'STRING':([50,72,78,79,80,81,82,83,108,112,116,169,170,172,173,218,219,220,221,249,251,262,277,],[82,-90,82,-85,-86,-87,-88,-89,82,82,-91,82,-56,82,-24,82,82,82,-66,82,82,82,82,]),')':([55,56,57,58,59,61,63,72,74,75,76,77,79,80,81,82,83,90,91,93,94,98,99,111,113,114,115,116,119,120,125,143,144,145,146,147,148,158,159,160,161,165,167,169,170,171,173,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,213,214,215,216,217,218,219,221,222,239,240,241,242,243,244,245,246,247,248,249,250,251,277,],[-13,-93,-94,-95,-96,-99,-97,-90,-16,-53,-53,-53,-85,-86,-87,-88,-89,-105,-108,125,-12,-131,-60,-82,-83,-50,173,-91,-98,-101,-23,-111,-112,-113,-69,-14,-15,-8,-9,-10,-11,-27,-132,-133,-56,-84,-24,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,-129,-130,-160,245,-137,-144,-78,-57,-3,-3,-66,-26,-138,-139,-140,-141,-142,-143,-25,-65,-79,-134,-20,-80,-135,-136,]),'^':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,157,158,159,160,161,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,213,214,215,223,239,240,241,242,243,244,245,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,126,-12,-131,-60,-98,-101,126,-23,126,126,126,-69,-14,-15,207,-8,-9,-10,-11,-103,-109,-106,-107,-110,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,-160,207,207,207,126,207,207,207,207,207,207,-25,126,]),'*':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,157,158,159,160,161,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,213,214,215,223,239,240,241,242,243,244,245,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,127,-12,-131,-60,-98,-101,127,-23,-111,127,-113,-69,-14,-15,208,-8,-9,-10,-11,-103,-109,-106,-107,-110,-114,-115,127,-117,-118,127,127,127,127,127,127,127,127,127,127,127,127,-160,208,-137,208,127,-138,-139,208,-141,208,208,-25,127,]),'/':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,157,158,159,160,161,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,213,214,215,223,239,240,241,242,243,244,245,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,128,-12,-131,-60,-98,-101,128,-23,-111,128,-113,-69,-14,-15,209,-8,-9,-10,-11,-103,-109,-106,-107,-110,-114,-115,128,-117,-118,128,128,128,128,128,128,128,128,128,128,128,128,-160,209,-137,209,128,-138,-139,209,-141,209,209,-25,128,]),'INTEGER_DIVIDE':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,129,-12,-131,-60,-98,-101,129,-23,-111,129,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,129,-117,-118,129,129,129,129,129,129,129,129,129,129,129,129,-160,129,129,]),'%':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,157,158,159,160,161,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,213,214,215,223,239,240,241,242,243,244,245,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,130,-12,-131,-60,-98,-101,130,-23,-111,130,-113,-69,-14,-15,210,-8,-9,-10,-11,-103,-109,-106,-107,-110,-114,-115,130,-117,-118,130,130,130,130,130,130,130,130,130,130,130,130,-160,210,-137,210,130,-138,-139,210,-141,210,210,-25,130,]),'+':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,157,158,159,160,161,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,213,214,215,223,239,240,241,242,243,244,245,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,131,-12,-131,-60,-98,-101,131,-23,-111,131,-113,-69,-14,-15,211,-8,-9,-10,-11,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,131,131,131,131,131,131,131,131,131,131,-160,211,-137,211,131,-138,-139,-140,-141,-142,-143,-25,131,]),'-':([55,56,57,58,59,61,62,63,89,90,91,93,94,95,96,97,98,99,109,119,120,122,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,157,158,159,160,161,162,163,164,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,206,207,208,209,210,211,212,213,214,215,223,239,240,241,242,243,244,245,261,274,285,],[-13,-93,-94,-95,-96,-99,97,-97,97,-105,-108,132,-12,97,97,97,-131,-60,163,-98,-101,132,-23,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,-111,132,-113,-69,-14,-15,212,-8,-9,-10,-11,163,163,163,-103,97,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,132,132,132,132,132,132,132,132,132,132,-160,163,163,163,163,163,163,163,212,-137,212,132,-138,-139,-140,-141,-142,-143,-25,97,97,132,]),'<':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,133,-12,-131,-60,-98,-101,133,-23,-111,133,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,None,None,None,None,None,None,133,None,133,None,-160,133,133,]),'LEQ':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,134,-12,-131,-60,-98,-101,134,-23,-111,134,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,None,None,None,None,None,None,134,None,134,None,-160,134,134,]),'LAEQ':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,135,-12,-131,-60,-98,-101,135,-23,-111,135,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,None,None,None,None,None,None,135,None,135,None,-160,135,135,]),'>':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,136,-12,-131,-60,-98,-101,136,-23,-111,136,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,None,None,None,None,None,None,136,None,136,None,-160,136,136,]),'GEQ':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,137,-12,-131,-60,-98,-101,137,-23,-111,137,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,None,None,None,None,None,None,137,None,137,None,-160,137,137,]),'GAEQ':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,138,-12,-131,-60,-98,-101,138,-23,-111,138,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,None,None,None,None,None,None,138,None,138,None,-160,138,138,]),'EQ':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,139,-12,-131,-60,-98,-101,139,-23,-111,139,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,None,-128,None,-130,-160,139,139,]),'AEQ':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,140,-12,-131,-60,-98,-101,140,-23,-111,140,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,None,None,None,None,None,None,140,None,140,None,-160,140,140,]),'NEQ':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,141,-12,-131,-60,-98,-101,141,-23,-111,141,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,None,-128,None,-130,-160,141,141,]),'NAEQ':([55,56,57,58,59,61,63,90,91,93,94,98,99,119,120,122,125,143,144,145,146,147,148,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,223,285,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,142,-12,-131,-60,-98,-101,142,-23,-111,142,-113,-69,-14,-15,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,None,None,None,None,None,None,142,None,142,None,-160,142,142,]),']':([55,56,57,58,59,61,63,90,91,94,98,99,119,120,121,122,125,143,144,145,146,147,148,155,156,157,158,159,160,161,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,214,215,223,238,239,240,241,242,243,244,245,276,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,-12,-131,-60,-98,-101,175,-59,-23,-111,-112,-113,-69,-14,-15,205,-58,-92,-8,-9,-10,-11,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,-129,-130,-160,-137,-144,-72,-71,-138,-139,-140,-141,-142,-143,-25,286,]),',':([55,56,57,58,59,61,63,90,91,94,98,99,119,120,121,122,125,143,144,145,146,147,148,155,156,157,158,159,160,161,175,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,214,215,223,238,239,240,241,242,243,244,245,276,],[-13,-93,-94,-95,-96,-99,-97,-105,-108,-12,-131,-60,-98,-101,176,-59,-23,-111,-112,-113,-69,-14,-15,206,-58,-92,-8,-9,-10,-11,-103,-109,-106,-107,-110,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,-129,-130,-160,-137,-144,-72,-71,-138,-139,-140,-141,-142,-143,-25,176,]),'.':([55,61,63,91,92,116,119,120,125,175,198,232,235,264,275,286,],[88,-99,-97,123,124,174,-98,-101,-23,-103,-160,-99,260,260,-101,-103,]),'[':([55,61,63,71,119,120,125,175,198,232,235,264,275,286,],[89,-99,-97,109,-98,-101,-23,-103,-160,-99,261,261,-101,-103,]),'ABS':([62,89,95,96,97,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,176,261,274,],[95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,]),'NOT':([62,89,95,96,97,109,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,162,163,164,176,206,207,208,209,210,211,212,261,274,],[96,96,96,96,96,164,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,164,164,164,96,164,164,164,164,164,164,164,96,96,]),'TAKING':([75,76,77,],[112,112,112,]),'OPEQ':([229,232,275,286,],[256,-100,-102,-104,]),'WITH':([255,],[267,]),} _lr_action = {} for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = {} _lr_action[_x][_k] = _y del _lr_action_items _lr_goto_items = {'file':([0,],[1,]),'newlines_opt':([0,],[2,]),'opmode':([2,],[3,]),'module':([2,],[4,]),'opmode_type':([2,],[6,]),'make_module':([7,],[11,]),'make_opmode':([10,],[12,]),'parameters':([11,102,104,106,],[13,151,152,153,]),'pos_parameters':([11,26,102,104,106,],[14,36,14,14,14,]),'required_parameters':([11,26,102,104,106,],[15,15,15,15,15,]),'newlines':([12,13,43,73,85,152,153,154,200,205,225,285,299,301,306,307,],[16,18,51,110,117,202,203,204,236,237,252,293,304,305,310,311,]),'kw_parameters':([14,],[19,]),'required_parameter':([15,],[21,]),'uses':([16,18,],[23,25,]),'keyword':([19,],[26,]),'optional_parameters':([20,],[29,]),'optional_parameter':([20,29,],[30,37,]),'typedefs':([23,25,66,],[32,35,101,]),'use':([23,25,],[33,33,]),'typedef':([32,35,101,],[38,38,38,]),'vartypes':([35,101,],[41,150,]),'arguments':([40,52,100,230,254,258,],[43,85,149,257,265,270,]),'pos_arguments':([40,52,87,100,230,254,258,],[45,45,118,45,45,45,45,]),'steps':([41,],[46,]),'vartype':([41,150,],[47,47,]),'kw_arguments':([45,],[53,]),'primary':([45,62,64,89,95,96,97,98,118,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,176,199,231,233,256,261,269,274,280,287,289,303,309,],[54,94,100,94,94,94,94,147,54,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,230,258,147,268,94,282,94,290,294,295,230,230,]),'simple_primary':([45,62,64,89,95,96,97,98,118,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,176,199,231,233,253,256,261,269,274,280,287,289,303,309,],[55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,235,55,55,264,55,55,55,55,55,55,55,235,235,]),'step':([46,],[65,]),'label_decl':([46,],[66,]),'type':([50,78,108,],[73,115,154,]),'simple_type':([50,78,108,112,169,172,218,219,220,249,251,262,277,],[74,74,74,170,221,170,170,170,170,221,221,170,221,]),'dotted_prefix':([50,78,108,112,169,172,218,219,220,249,251,262,277,],[84,84,84,84,84,84,84,84,84,84,84,84,84,]),'kw_argument':([53,],[86,]),'expr':([62,89,95,96,97,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,176,261,274,],[93,122,143,144,145,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,223,122,285,]),'native_elements':([62,89,95,96,97,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,176,199,261,274,303,309,],[98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,233,98,98,233,233,]),'fn_name':([67,],[102,]),'sub_name':([68,],[104,]),'label_name':([69,],[106,]),'taking_opt':([75,76,77,],[111,113,114,]),'subscripts':([89,261,],[121,276,]),'native_element':([98,233,],[146,146,]),'dimensions':([109,],[155,]),'dimension':([109,206,],[156,238,]),'const_expr':([109,162,163,164,206,207,208,209,210,211,212,],[157,213,214,215,157,239,240,241,242,243,244,]),'parameter_types':([112,172,],[165,222,]),'pos_parameter_types':([112,172,218,219,],[166,166,247,250,]),'pos_parameter_types1':([112,172,],[167,167,]),'parameter_types_list':([112,172,218,219,],[168,168,248,248,]),'parameter_types_list1':([112,172,218,219,220,262,],[169,169,249,249,251,277,]),'returning_opt':([114,151,],[171,201,]),'statements':([150,],[199,]),'set_returning_opt':([151,],[200,]),'kw_parameter_types':([166,],[216,]),'kw_parameter_type':([166,216,],[217,246,]),'statement':([199,],[224,]),'simple_statement':([199,303,309,],[225,306,306,]),'dlt':([199,],[226,]),'extended_kws':([199,227,303,309,],[228,253,228,228,]),'lvalue':([199,253,303,309,],[229,263,229,229,]),'normal_kws':([255,278,279,281,295,],[266,287,288,291,300,]),'dlt_conditions':([259,],[271,]),'conditions':([259,],[272,]),'condition':([259,272,],[273,284,]),'from_opt':([265,],[279,]),'dlt_actions':([292,],[296,]),'actions':([292,],[297,]),'action':([292,297,],[298,302,]),'action_statements':([299,304,],[303,309,]),'continue':([303,309,],[307,307,]),} _lr_goto = {} for _k, _v in _lr_goto_items.items(): for _x, _y in zip(_v[0], _v[1]): if not _x in _lr_goto: _lr_goto[_x] = {} _lr_goto[_x][_k] = _y del _lr_goto_items _lr_productions = [ ("S' -> file","S'",1,None,None,None), ('pos_arguments -> <empty>','pos_arguments',0,'p_empty_tuple','parser.py',43), ('kw_arguments -> <empty>','kw_arguments',0,'p_empty_tuple','parser.py',44), ('parameter_types_list -> <empty>','parameter_types_list',0,'p_empty_tuple','parser.py',45), ('steps -> <empty>','steps',0,'p_empty_tuple','parser.py',46), ('statements -> <empty>','statements',0,'p_empty_tuple','parser.py',47), ('action_statements -> <empty>','action_statements',0,'p_empty_tuple','parser.py',48), ('dotted_prefix -> <empty>','dotted_prefix',0,'p_empty_tuple','parser.py',49), ('const_expr -> STRING_LIT','const_expr',1,'p_first','parser.py',56), ('const_expr -> FLOAT_LIT','const_expr',1,'p_first','parser.py',57), ('const_expr -> INTEGER_LIT','const_expr',1,'p_first','parser.py',58), ('const_expr -> BOOLEAN_LIT','const_expr',1,'p_first','parser.py',59), ('expr -> primary','expr',1,'p_first','parser.py',60), ('primary -> simple_primary','primary',1,'p_first','parser.py',61), ('native_element -> primary','native_element',1,'p_first','parser.py',62), ('native_element -> NATIVE_STRING_LIT','native_element',1,'p_first','parser.py',63), ('type -> simple_type','type',1,'p_first','parser.py',64), ('statement -> simple_statement newlines','statement',2,'p_first','parser.py',65), ('statement -> dlt','statement',1,'p_first','parser.py',66), ('actions -> action','actions',1,'p_first','parser.py',67), ('parameter_types_list -> parameter_types_list1','parameter_types_list',1,'p_first','parser.py',68), ('opmode_type -> AUTONOMOUS','opmode_type',1,'p_first','parser.py',69), ('opmode_type -> TELEOP','opmode_type',1,'p_first','parser.py',70), ('simple_primary -> ( expr )','simple_primary',3,'p_second','parser.py',77), ('simple_type -> ( type )','simple_type',3,'p_second','parser.py',78), ('const_expr -> ( const_expr )','const_expr',3,'p_second','parser.py',79), ('returning_opt -> RETURNING parameter_types','returning_opt',2,'p_second','parser.py',80), ('taking_opt -> TAKING parameter_types','taking_opt',2,'p_second','parser.py',81), ('from_opt -> FROM primary','from_opt',2,'p_second','parser.py',82), ('label_decl -> FUNCTION fn_name parameters set_returning_opt newlines','label_decl',5,'p_second','parser.py',83), ('label_decl -> SUBROUTINE sub_name parameters newlines','label_decl',4,'p_second','parser.py',84), ('label_decl -> LABEL label_name parameters newlines','label_decl',4,'p_second','parser.py',85), ('newlines_opt -> <empty>','newlines_opt',0,'p_none','parser.py',92), ('newlines_opt -> newlines_opt NEWLINE','newlines_opt',2,'p_none','parser.py',93), ('newlines -> NEWLINE','newlines',1,'p_none','parser.py',94), ('newlines -> newlines NEWLINE','newlines',2,'p_none','parser.py',95), ('opmode -> opmode_type OPMODE make_opmode newlines uses typedefs','opmode',6,'p_none','parser.py',96), ('typedefs -> <empty>','typedefs',0,'p_none','parser.py',97), ('typedefs -> typedefs typedef','typedefs',2,'p_none','parser.py',98), ('vartypes -> <empty>','vartypes',0,'p_none','parser.py',99), ('vartypes -> vartypes vartype','vartypes',2,'p_none','parser.py',100), ('parameters -> pos_parameters kw_parameters','parameters',2,'p_none','parser.py',101), ('pos_parameters -> required_parameters','pos_parameters',1,'p_none','parser.py',102), ('pos_parameters -> required_parameters ? optional_parameters','pos_parameters',3,'p_none','parser.py',103), ('required_parameters -> <empty>','required_parameters',0,'p_none','parser.py',104), ('required_parameters -> required_parameters required_parameter','required_parameters',2,'p_none','parser.py',105), ('optional_parameters -> optional_parameter','optional_parameters',1,'p_none','parser.py',106), ('optional_parameters -> optional_parameters optional_parameter','optional_parameters',2,'p_none','parser.py',107), ('kw_parameters -> <empty>','kw_parameters',0,'p_none','parser.py',108), ('kw_parameters -> kw_parameters keyword pos_parameters','kw_parameters',3,'p_none','parser.py',109), ('returning_opt -> <empty>','returning_opt',0,'p_none','parser.py',110), ('uses -> <empty>','uses',0,'p_none','parser.py',111), ('uses -> uses use','uses',2,'p_none','parser.py',112), ('taking_opt -> <empty>','taking_opt',0,'p_none','parser.py',113), ('from_opt -> <empty>','from_opt',0,'p_none','parser.py',114), ('conditions -> condition','conditions',1,'p_1tuple','parser.py',121), ('parameter_types_list1 -> simple_type','parameter_types_list1',1,'p_1tuple','parser.py',122), ('kw_parameter_types -> kw_parameter_type','kw_parameter_types',1,'p_1tuple','parser.py',123), ('dimensions -> dimension','dimensions',1,'p_1tuple','parser.py',124), ('subscripts -> expr','subscripts',1,'p_1tuple','parser.py',125), ('native_elements -> NATIVE_STRING_LIT','native_elements',1,'p_1tuple','parser.py',126), ('kw_arguments -> kw_arguments kw_argument','kw_arguments',2,'p_append','parser.py',133), ('statements -> statements statement','statements',2,'p_append','parser.py',134), ('conditions -> conditions condition','conditions',2,'p_append','parser.py',135), ('pos_arguments -> pos_arguments primary','pos_arguments',2,'p_append','parser.py',136), ('kw_parameter_types -> kw_parameter_types kw_parameter_type','kw_parameter_types',2,'p_append','parser.py',137), ('parameter_types_list1 -> parameter_types_list1 simple_type','parameter_types_list1',2,'p_append','parser.py',138), ('action_statements -> action_statements simple_statement newlines','action_statements',3,'p_append','parser.py',139), ('action_statements -> action_statements continue newlines','action_statements',3,'p_append','parser.py',140), ('native_elements -> native_elements native_element','native_elements',2,'p_append','parser.py',141), ('dotted_prefix -> dotted_prefix IDENT .','dotted_prefix',3,'p_append','parser.py',142), ('dimensions -> dimensions , dimension','dimensions',3,'p_dimensions','parser.py',149), ('subscripts -> subscripts , expr','subscripts',3,'p_dimensions','parser.py',150), ('actions -> actions action','actions',2,'p_actions','parser.py',156), ('step -> label_decl typedefs vartypes statements','step',4,'p_step1','parser.py',162), ('steps -> steps step','steps',2,'p_paste','parser.py',169), ('arguments -> pos_arguments kw_arguments','arguments',2,'p_all','parser.py',176), ('kw_argument -> KEYWORD pos_arguments','kw_argument',2,'p_all','parser.py',177), ('parameter_types -> pos_parameter_types kw_parameter_types','parameter_types',2,'p_all','parser.py',178), ('kw_parameter_type -> KEYWORD pos_parameter_types','kw_parameter_type',2,'p_kw_parameter_type','parser.py',185), ('kw_parameter_type -> OPT_KEYWORD pos_parameter_types','kw_parameter_type',2,'p_kw_parameter_type','parser.py',186), ('module -> MODULE make_module parameters newlines uses typedefs vartypes steps','module',8,'p_module','parser.py',193), ('type -> SUBROUTINE taking_opt','type',2,'p_label_type1','parser.py',201), ('type -> LABEL taking_opt','type',2,'p_label_type1','parser.py',202), ('type -> FUNCTION taking_opt returning_opt','type',3,'p_label_type2','parser.py',209), ('simple_type -> INTEGER','simple_type',1,'p_builtin_type','parser.py',216), ('simple_type -> FLOAT','simple_type',1,'p_builtin_type','parser.py',217), ('simple_type -> BOOLEAN','simple_type',1,'p_builtin_type','parser.py',218), ('simple_type -> STRING','simple_type',1,'p_builtin_type','parser.py',219), ('simple_type -> MODULE','simple_type',1,'p_builtin_type','parser.py',220), ('simple_type -> TYPE','simple_type',1,'p_builtin_type','parser.py',221), ('simple_type -> dotted_prefix IDENT','simple_type',2,'p_typename','parser.py',227), ('dimension -> const_expr','dimension',1,'p_dimension','parser.py',233), ('primary -> STRING_LIT','primary',1,'p_primary_literal','parser.py',242), ('primary -> FLOAT_LIT','primary',1,'p_primary_literal','parser.py',243), ('primary -> INTEGER_LIT','primary',1,'p_primary_literal','parser.py',244), ('primary -> BOOLEAN_LIT','primary',1,'p_primary_literal','parser.py',245), ('simple_primary -> RETURN_LABEL','simple_primary',1,'p_return_label1','parser.py',252), ('simple_primary -> simple_primary . RETURN_LABEL','simple_primary',3,'p_return_label2','parser.py',259), ('simple_primary -> IDENT','simple_primary',1,'p_primary_ident','parser.py',266), ('lvalue -> IDENT','lvalue',1,'p_primary_ident','parser.py',267), ('simple_primary -> simple_primary . IDENT','simple_primary',3,'p_primary_dot','parser.py',274), ('lvalue -> simple_primary . IDENT','lvalue',3,'p_primary_dot','parser.py',275), ('simple_primary -> simple_primary [ subscripts ]','simple_primary',4,'p_primary_subscript','parser.py',282), ('lvalue -> simple_primary [ subscripts ]','lvalue',4,'p_primary_subscript','parser.py',283), ('primary -> GOT KEYWORD','primary',2,'p_primary_got_keyword1','parser.py',289), ('primary -> GOT IDENT . KEYWORD','primary',4,'p_primary_got_keyword2','parser.py',294), ('primary -> GOT MODULE . KEYWORD','primary',4,'p_primary_got_keyword3','parser.py',299), ('primary -> GOT IDENT','primary',2,'p_primary_got_param1','parser.py',304), ('primary -> GOT IDENT . IDENT','primary',4,'p_primary_got_param2','parser.py',309), ('primary -> GOT MODULE . IDENT','primary',4,'p_primary_got_param3','parser.py',314), ('expr -> ABS expr','expr',2,'p_unary_expr','parser.py',320), ('expr -> NOT expr','expr',2,'p_unary_expr','parser.py',321), ('expr -> - expr','expr',2,'p_unary_expr','parser.py',322), ('expr -> expr ^ expr','expr',3,'p_binary_expr','parser.py',329), ('expr -> expr * expr','expr',3,'p_binary_expr','parser.py',330), ('expr -> expr / expr','expr',3,'p_binary_expr','parser.py',331), ('expr -> expr INTEGER_DIVIDE expr','expr',3,'p_binary_expr','parser.py',332), ('expr -> expr % expr','expr',3,'p_binary_expr','parser.py',333), ('expr -> expr + expr','expr',3,'p_binary_expr','parser.py',334), ('expr -> expr - expr','expr',3,'p_binary_expr','parser.py',335), ('expr -> expr < expr','expr',3,'p_binary_expr','parser.py',336), ('expr -> expr LEQ expr','expr',3,'p_binary_expr','parser.py',337), ('expr -> expr LAEQ expr','expr',3,'p_binary_expr','parser.py',338), ('expr -> expr > expr','expr',3,'p_binary_expr','parser.py',339), ('expr -> expr GEQ expr','expr',3,'p_binary_expr','parser.py',340), ('expr -> expr GAEQ expr','expr',3,'p_binary_expr','parser.py',341), ('expr -> expr EQ expr','expr',3,'p_binary_expr','parser.py',342), ('expr -> expr AEQ expr','expr',3,'p_binary_expr','parser.py',343), ('expr -> expr NEQ expr','expr',3,'p_binary_expr','parser.py',344), ('expr -> expr NAEQ expr','expr',3,'p_binary_expr','parser.py',345), ('expr -> native_elements','expr',1,'p_native_expr','parser.py',352), ('parameter_types -> pos_parameter_types1','parameter_types',1,'p_pos_parameter_types1','parser.py',359), ('pos_parameter_types1 -> parameter_types_list1','pos_parameter_types1',1,'p_pos_parameter_types1','parser.py',360), ('pos_parameter_types -> parameter_types_list','pos_parameter_types',1,'p_pos_parameter_types1','parser.py',361), ('pos_parameter_types1 -> parameter_types_list ? parameter_types_list1','pos_parameter_types1',3,'p_pos_parameter_types2','parser.py',368), ('pos_parameter_types -> parameter_types_list ? parameter_types_list1','pos_parameter_types',3,'p_pos_parameter_types2','parser.py',369), ('const_expr -> - const_expr','const_expr',2,'p_const_numeric_expr_uminus','parser.py',376), ('const_expr -> const_expr ^ const_expr','const_expr',3,'p_const_numeric_expr_binary','parser.py',386), ('const_expr -> const_expr * const_expr','const_expr',3,'p_const_numeric_expr_binary','parser.py',387), ('const_expr -> const_expr / const_expr','const_expr',3,'p_const_numeric_expr_binary','parser.py',388), ('const_expr -> const_expr % const_expr','const_expr',3,'p_const_numeric_expr_binary','parser.py',389), ('const_expr -> const_expr + const_expr','const_expr',3,'p_const_numeric_expr_binary','parser.py',390), ('const_expr -> const_expr - const_expr','const_expr',3,'p_const_numeric_expr_binary','parser.py',391), ('const_expr -> NOT const_expr','const_expr',2,'p_const_bool_expr_uminus','parser.py',413), ('continue -> CONTINUE','continue',1,'p_simple_statement1','parser.py',423), ('simple_statement -> SET extended_kws lvalue TO normal_kws primary','simple_statement',6,'p_simple_statement2','parser.py',430), ('simple_statement -> GOTO primary arguments','simple_statement',3,'p_simple_statement3','parser.py',437), ('simple_statement -> extended_kws RETURN arguments from_opt normal_kws','simple_statement',5,'p_simple_statement4','parser.py',444), ('simple_statement -> extended_kws RETURN arguments from_opt TO primary normal_kws','simple_statement',7,'p_simple_statement5','parser.py',451), ('extended_kws -> <empty>','extended_kws',0,'p_extended_kws','parser.py',458), ('normal_kws -> <empty>','normal_kws',0,'p_normal_kws','parser.py',463), ('simple_statement -> primary arguments','simple_statement',2,'p_simple_statement6','parser.py',469), ('simple_statement -> primary arguments RETURNING_TO primary','simple_statement',4,'p_simple_statement7','parser.py',476), ('simple_statement -> lvalue OPEQ primary','simple_statement',3,'p_simple_statement8','parser.py',483), ('simple_statement -> extended_kws DONE normal_kws','simple_statement',3,'p_simple_statement9','parser.py',490), ('simple_statement -> extended_kws DONE WITH IDENT normal_kws','simple_statement',5,'p_simple_statement10','parser.py',497), ('simple_statement -> native_elements','simple_statement',1,'p_simple_statement11','parser.py',504), ('file -> newlines_opt opmode','file',2,'p_file','parser.py',511), ('file -> newlines_opt module','file',2,'p_file','parser.py',512), ('simple_primary -> { primary arguments }','simple_primary',4,'p_primary','parser.py',519), ('make_opmode -> <empty>','make_opmode',0,'p_make_opmode','parser.py',525), ('make_module -> <empty>','make_module',0,'p_make_module','parser.py',530), ('required_parameter -> IDENT','required_parameter',1,'p_required_parameter','parser.py',540), ('optional_parameter -> IDENT','optional_parameter',1,'p_optional_parameter','parser.py',547), ('keyword -> KEYWORD','keyword',1,'p_keyword','parser.py',553), ('keyword -> OPT_KEYWORD','keyword',1,'p_opt_keyword','parser.py',558), ('use -> USE IDENT arguments newlines','use',4,'p_use1','parser.py',564), ('use -> USE IDENT AS IDENT arguments newlines','use',6,'p_use2','parser.py',575), ('typedef -> TYPE IDENT IS type newlines','typedef',5,'p_typedef','parser.py',586), ('set_returning_opt -> returning_opt','set_returning_opt',1,'p_set_returning_opt','parser.py',592), ('vartype -> VAR IDENT IS type newlines','vartype',5,'p_vartype','parser.py',599), ('vartype -> DIM IDENT [ dimensions ] newlines','vartype',6,'p_dim','parser.py',611), ('fn_name -> IDENT','fn_name',1,'p_fn_name','parser.py',621), ('sub_name -> IDENT','sub_name',1,'p_sub_name','parser.py',626), ('label_name -> IDENT','label_name',1,'p_label_name','parser.py',631), ('dlt -> DLT_DELIMITER NEWLINE dlt_conditions DLT_DELIMITER NEWLINE dlt_actions DLT_DELIMITER newlines','dlt',8,'p_dlt','parser.py',637), ('condition -> DLT_MASK expr newlines','condition',3,'p_condition','parser.py',648), ('action -> DLT_MAP action_statements','action',2,'p_action1','parser.py',655), ('action -> DLT_MAP newlines action_statements','action',3,'p_action2','parser.py',662), ('dlt_conditions -> conditions','dlt_conditions',1,'p_dlt_conditions','parser.py',668), ('dlt_actions -> actions','dlt_actions',1,'p_dlt_actions','parser.py',673), ]
/robot_brains-0.1.0-py3-none-any.whl/robot_brains/parsetab.py
0.444324
0.227577
parsetab.py
pypi
import socket import numpy as np class Manipulator: def __init__(self, ip, port_read, port_write): """ Opens connection between a root and a host robot """ assert isinstance(ip, str) assert isinstance(port_read, int) or isinstance(port_read, str) assert isinstance(port_write, int) or isinstance(port_write, str) if type(port_read) is str: port_read = int(port_read) if type(port_write) is str: port_write = int(port_write) self.ip = ip self.port_write = port_write self.port_read = port_read self.coordinates_mapping = None try: self.socket_read = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket_write = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket_read.connect((self.ip, self.port_read)) self.socket_write.connect((self.ip, self.port_write)) self.socket_write.settimeout(5) self.socket_read.settimeout(5) print("[Socket -- IP: {0}. Write port: {1}, read port: {2}]\n".format(ip, self.port_write, self.port_read)) except socket.error, exc: print("[Socket cannot be created. Exception occured:\n{0}]\n".format(exc)) def __del__(self): """ Closes sockets created during initialization. """ self.socket_read.close() self.socket_write.close() print("\n[Sockets succesfully closed.]\n".format(self.ip)) def set_mapping(self, matrix): """ Set mapping between robot and some external coordinate systems. :param matrix: Homogeneous matrix np.array() 4x4 :return: void """ assert isinstance(matrix, np.ndarray) assert matrix.shape == (4, 4) self.coordinates_mapping = matrix print("Coordinates mapping will be applied:\n{0}\n From now on specify robot's pose in your coordinate system.".format(matrix)) def reset_mapping(self): """ Removes homogeneous matrix of transformation between a robot and external camera coordinate system. """ self.coordinates_mapping = None def get_mapping(self): """ Sets homogeneous matrix of transformation between a robot and external camera coordinate system. """ return self.coordinates_mapping def grip(self, range_open): pass def move(self, *args): raise NotImplementedError("Implement this method") def get_pose(self): raise NotImplementedError("Implement this method") def get_joints(self): raise NotImplementedError("Implement this method") def execute_in_force_mode(self, *args): raise NotImplementedError("Implement this method")
/robot_controller-0.3.tar.gz/robot_controller-0.3/robot_controller/manipulators/manipulator.py
0.661814
0.390476
manipulator.py
pypi
import cv2 import numpy as np import math def robot_command(method): def func_wrapper(self, *args, **kwargs): try: return method(self, *args, **kwargs) except Exception as exc: print("Exception info:\n{0}".format(exc)) return func_wrapper def rotation_matrix(axis, angle): matrix = np.eye(3) c = np.cos(angle) s = np.sin(angle) if axis == 'x': matrix = [[1, 0, 0], [0, c, -s], [0, s, c]] elif axis == 'y': matrix = [[c, 0, s], [0, 1, 0], [-s, 0, c]] elif axis == 'z': matrix = [[c, -s, 0], [s, c, 0], [0, 0, 1]] return matrix # Checks if a matrix is a valid rotation matrix. def is_rotation_mat(R): Rt = np.transpose(R) is_identity = np.dot(Rt, R) I = np.identity(3, dtype=R.dtype) n = np.linalg.norm(I - is_identity) return n < 1e-6 def rot2euler(R): sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0]) singular = sy < 1e-6 if not singular: x = math.atan2(R[2, 1], R[2, 2]) y = math.atan2(-R[2, 0], sy) z = math.atan2(R[1, 0], R[0, 0]) else: x = math.atan2(-R[1, 2], R[1, 1]) y = math.atan2(-R[2, 0], sy) z = 0 return np.array([x, y, z]) def euler2rot(theta): R_x = np.array([[1, 0, 0], [0, math.cos(theta[0]), -math.sin(theta[0])], [0, math.sin(theta[0]), math.cos(theta[0])] ]) R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])], [0, 1, 0], [-math.sin(theta[1]), 0, math.cos(theta[1])] ]) R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0], [math.sin(theta[2]), math.cos(theta[2]), 0], [0, 0, 1] ]) R = np.dot(R_z, np.dot(R_y, R_x)) return R def pose2mat(pose): assert isinstance(pose, np.ndarray) r = cv2.Rodrigues(pose[3:])[0] h = np.array([[pose[0]], [pose[1]], [pose[2]]]) rt = np.concatenate([r, h], axis=1) bottom_row = np.array([0, 0, 0, 1]).reshape((1, 4)) rt = np.concatenate([rt, bottom_row], axis=0) return rt def mat2pose(mat): assert isinstance(mat, np.ndarray) rvec = cv2.Rodrigues(mat[:3, :3])[0].reshape(1, 3) tvec = np.asarray(mat[:3, 3]).reshape(1, 3) pose = np.vstack([tvec, rvec]).reshape(6) return pose def rg6_cmd(range_open, force=50): cmd_str = "def rg6ProgOpen():\n"; cmd_str += "\ttextmsg(\"inside RG6 function called\")\n"; cmd_str += "\ttarget_width={0}\n".format(range_open); cmd_str += "\ttarget_force={0}\n".format(force); cmd_str += "\tpayload=1.0\n"; cmd_str += "\tset_payload1=False\n"; cmd_str += "\tdepth_compensation=False\n"; cmd_str += "\tslave=False\n"; cmd_str += "\ttimeout = 0\n"; cmd_str += "\twhile get_digital_in(9) == False:\n"; cmd_str += "\t\ttextmsg(\"inside while\")\n"; cmd_str += "\t\tif timeout > 400:\n"; cmd_str += "\t\t\tbreak\n"; cmd_str += "\t\tend\n"; cmd_str += "\t\ttimeout = timeout+1\n"; cmd_str += "\t\tsync()\n"; cmd_str += "\tend\n"; cmd_str += "\ttextmsg(\"outside while\")\n"; cmd_str += "\tdef bit(input):\n"; cmd_str += "\t\tmsb=65536\n"; cmd_str += "\t\tlocal i=0\n"; cmd_str += "\t\tlocal output=0\n"; cmd_str += "\t\twhile i<17:\n"; cmd_str += "\t\t\tset_digital_out(8,True)\n"; cmd_str += "\t\t\tif input>=msb:\n"; cmd_str += "\t\t\t\tinput=input-msb\n"; cmd_str += "\t\t\t\tset_digital_out(9,False)\n"; cmd_str += "\t\t\telse:\n"; cmd_str += "\t\t\t\tset_digital_out(9,True)\n"; cmd_str += "\t\t\tend\n"; cmd_str += "\t\t\tif get_digital_in(8):\n"; cmd_str += "\t\t\t\tout=1\n"; cmd_str += "\t\t\tend\n"; cmd_str += "\t\t\tsync()\n"; cmd_str += "\t\t\tset_digital_out(8,False)\n"; cmd_str += "\t\t\tsync()\n"; cmd_str += "\t\t\tinput=input*2\n"; cmd_str += "\t\t\toutput=output*2\n"; cmd_str += "\t\t\ti=i+1\n"; cmd_str += "\t\tend\n"; cmd_str += "\t\treturn output\n"; cmd_str += "\tend\n"; cmd_str += "\ttextmsg(\"outside bit definition\")\n"; cmd_str += "\ttarget_width=target_width+0.0\n"; cmd_str += "\tif target_force>40:\n"; cmd_str += "\t\ttarget_force=40\n"; cmd_str += "\tend\n"; cmd_str += "\tif target_force<4:\n"; cmd_str += "\t\ttarget_force=4\n"; cmd_str += "\tend\n"; cmd_str += "\tif target_width>150:\n"; cmd_str += "\t\ttarget_width=150\n"; cmd_str += "\tend\n"; cmd_str += "\tif target_width<0:\n"; cmd_str += "\t\ttarget_width=0\n"; cmd_str += "\tend\n"; cmd_str += "\trg_data=floor(target_width)*4\n"; cmd_str += "\trg_data=rg_data+floor(target_force/2)*4*111\n"; cmd_str += "\tif slave:\n"; cmd_str += "\t\trg_data=rg_data+16384\n"; cmd_str += "\tend\n"; cmd_str += "\ttextmsg(\"about to call bit\")\n"; cmd_str += "\tbit(rg_data)\n"; cmd_str += "\ttextmsg(\"called bit\")\n"; cmd_str += "\tif depth_compensation:\n"; cmd_str += "\t\tfinger_length = 55.0/1000\n"; cmd_str += "\t\tfinger_heigth_disp = 5.0/1000\n"; cmd_str += "\t\tcenter_displacement = 7.5/1000\n"; cmd_str += "\t\tstart_pose = get_forward_kin()\n"; cmd_str += "\t\tset_analog_inputrange(2, 1)\n"; cmd_str += "\t\tzscale = (get_analog_in(2)-0.026)/2.976\n"; cmd_str += "\t\tzangle = zscale*1.57079633-0.087266462\n"; cmd_str += "\t\tzwidth = 5+110*sin(zangle)\n"; cmd_str += "\t\tstart_depth = cos(zangle)*finger_length\n"; cmd_str += "\t\tsync()\n"; cmd_str += "\t\tsync()\n"; cmd_str += "\t\ttimeout = 0\n"; cmd_str += "\t\twhile get_digital_in(9) == True:\n"; cmd_str += "\t\t\ttimeout=timeout+1\n"; cmd_str += "\t\t\tsync()\n"; cmd_str += "\t\t\tif timeout > 20:\n"; cmd_str += "\t\t\t\tbreak\n"; cmd_str += "\t\t\tend\n"; cmd_str += "\t\tend\n"; cmd_str += "\t\ttimeout = 0\n"; cmd_str += "\t\twhile get_digital_in(9) == False:\n"; cmd_str += "\t\t\tzscale = (get_analog_in(2)-0.026)/2.976\n"; cmd_str += "\t\t\tzangle = zscale*1.57079633-0.087266462\n"; cmd_str += "\t\t\tzwidth = 5+110*sin(zangle)\n"; cmd_str += "\t\t\tmeasure_depth = cos(zangle)*finger_length\n"; cmd_str += "\t\t\tcompensation_depth = (measure_depth - start_depth)\n"; cmd_str += "\t\t\ttarget_pose = pose_trans(start_pose,p[0,0,-compensation_depth,0,0,0])\n"; cmd_str += "\t\t\tif timeout > 400:\n"; cmd_str += "\t\t\t\tbreak\n"; cmd_str += "\t\t\tend\n"; cmd_str += "\t\t\ttimeout=timeout+1\n"; cmd_str += "\t\t\tservoj(get_inverse_kin(target_pose),0,0,0.008,0.033,1700)\n"; cmd_str += "\t\tend\n"; cmd_str += "\t\tnspeed = norm(get_actual_tcp_speed())\n"; cmd_str += "\t\twhile nspeed > 0.001:\n"; cmd_str += "\t\t\tservoj(get_inverse_kin(target_pose),0,0,0.008,0.033,1700)\n"; cmd_str += "\t\t\tnspeed = norm(get_actual_tcp_speed())\n"; cmd_str += "\t\tend\n"; cmd_str += "\tend\n"; cmd_str += "\tif depth_compensation==False:\n"; cmd_str += "\t\ttimeout = 0\n"; cmd_str += "\t\twhile get_digital_in(9) == True:\n"; cmd_str += "\t\t\ttimeout = timeout+1\n"; cmd_str += "\t\t\tsync()\n"; cmd_str += "\t\t\tif timeout > 20:\n"; cmd_str += "\t\t\t\tbreak\n"; cmd_str += "\t\t\tend\n"; cmd_str += "\t\tend\n"; cmd_str += "\t\ttimeout = 0\n"; cmd_str += "\t\twhile get_digital_in(9) == False:\n"; cmd_str += "\t\t\ttimeout = timeout+1\n"; cmd_str += "\t\t\tsync()\n"; cmd_str += "\t\t\tif timeout > 400:\n"; cmd_str += "\t\t\t\tbreak\n"; cmd_str += "\t\t\tend\n"; cmd_str += "\t\tend\n"; cmd_str += "\tend\n"; cmd_str += "\tif set_payload1:\n"; cmd_str += "\t\tif slave:\n"; cmd_str += "\t\t\tif get_analog_in(3) < 2:\n"; cmd_str += "\t\t\t\tzslam=0\n"; cmd_str += "\t\t\telse:\n"; cmd_str += "\t\t\t\tzslam=payload\n"; cmd_str += "\t\t\tend\n"; cmd_str += "\t\telse:\n"; cmd_str += "\t\t\tif get_digital_in(8) == False:\n"; cmd_str += "\t\t\t\tzmasm=0\n"; cmd_str += "\t\t\telse:\n"; cmd_str += "\t\t\t\tzmasm=payload\n"; cmd_str += "\t\t\tend\n"; cmd_str += "\t\tend\n"; cmd_str += "\t\tzsysm=0.0\n"; cmd_str += "\t\tzload=zmasm+zslam+zsysm\n"; cmd_str += "\t\tset_payload(zload)\n"; cmd_str += "\tend\n"; cmd_str += "end\n\n"; return cmd_str def find_aruco(marker_size, blocking=False): # read camera intrinsic parameters fs = cv2.FileStorage("./intr.yml", cv2.FILE_STORAGE_READ) cam_mat = fs.getNode("camera_matrix").mat() # print "Camera matrix: {0}".format(cam_mat) # read data cv2.namedWindow('find_aruco') # img = cv2.imread('./aruco.png', cv2.IMREAD_GRAYSCALE) cap = cv2.VideoCapture(2) rvec, tvec, corners = None, None, None while True: ret, img = cap.read() aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250) parameters = cv2.aruco.DetectorParameters_create() # lists of ids and the corners beloning to each id corners, ids, rejected = cv2.aruco.detectMarkers(img, aruco_dict, parameters=parameters) if len(corners) > 0: img = cv2.aruco.drawDetectedMarkers(img, corners, ids, borderColor=(0, 0, 255)) distCoeffs = None rvec, tvec, _ = cv2.aruco.estimatePoseSingleMarkers(corners, marker_size, cam_mat, distCoeffs) if rvec is not None and tvec is not None: cv2.aruco.drawAxis(img, cam_mat, distCoeffs, rvec, tvec, 0.1) cv2.imshow('find_aruco', img) k = cv2.waitKey(1) if k == ord('q'): if not blocking: cv2.destroyAllWindows() break return tvec, rvec, corners, cam_mat
/robot_controller-0.3.tar.gz/robot_controller-0.3/robot_controller/manipulators/utils.py
0.539469
0.570511
utils.py
pypi
import os import pickle from tools.data_loader import DataLoader from tools.read_hokuyo_30m import read_hokuyo from tools.tar_extract import tar_extract from tools.download_tar import download_tar class DataManager: ''' This class handles downloading, extracting and storing data to be used by the main application. ''' def __init__(self, date): self.owd = os.path.abspath('.') self.data_dir_name = 'data' self.base_name = 'http://robots.engin.umich.edu/nclt' self.date = date self.data_dir = os.path.join(self.owd, os.path.join(self.data_dir_name, self.date)) self.data_dict = {} def setup_data_files(self, data_type): ''' This function sets up data files by downloading and extracting them into the *data* directory. :param data_type: Selects lidar or GPS data. :type data_type: str. :return: None ''' filename = download_tar(self.base_name, self.date, data_type) tar_extract(filename) os.chdir(self.owd) def load_gps(self): ''' This function loads data into the data manager's data dictionary. :return: None ''' os.chdir(self.owd) gps_file_path = os.path.join(self.data_dir_name, os.path.join(self.date, 'gps.csv')) data_loader = DataLoader(gps_file_path) self.data_dict['gps'] = data_loader.get_gps_dictionary() self.data_dict['gps_range'] = data_loader.get_gps_range() def load_lidar(self, num_samples, pickled=None, delete_pickle=None): ''' This function loads lidar, with the option to use a pickled file. :param num_samples: Number of samples to load :type num_samples: int. :param pickled: If *pickled='pickled'*, load from existing pickle of lidar, otherwise use existing and save a pickle of the data. :type pickled: str. :param delete_pickle: If *delete_pickle='delete'*, delete any existing pickle of lidar. :type delete_pickle: str. ''' os.chdir(self.owd) lidar_file_path = os.path.join(self.data_dir_name, os.path.join(self.date, 'hokuyo_30m.bin')) if delete_pickle == 'delete': if os.path.exists("lidar.pickle"): os.remove("lidar.pickle") print("there is no pickle to delete") if pickled == 'pickled': try: pickled_lidar = pickle.load(open("lidar.pickle", "rb")) self.data_dict['lidar'] = pickled_lidar except (OSError, IOError) as e: self.data_dict['lidar'] = read_hokuyo(lidar_file_path, num_samples) pickled_lidar = self.data_dict['lidar'] pickle.dump(pickled_lidar, open("lidar.pickle", "wb")) else: self.data_dict['lidar'] = read_hokuyo(lidar_file_path, num_samples) def load_all(self): ''' This function loads both the GPS and lidar data. :return: None ''' self.load_gps() self.load_lidar(100) # Note this could take a while - loads a lot of samples def get_data(self, key=None): ''' This function gets data from the data manager's data dictionary. :return: value of data at *key* ''' if key is None: return self.data_dict return self.data_dict[key]
/robot-data-visualizer-0.3.tar.gz/robot-data-visualizer-0.3/tools/data_manager.py
0.445288
0.235108
data_manager.py
pypi
from math import sqrt, log, tan, pi, cos from staticmap import StaticMap from PIL import Image RECT = 90 HALF_DEGREE = 180 FULL_DEGREE = 360 class StaticMapBaseLayer(StaticMap): ''' This is a overide class inherited from StaticMap. We overide some functions there to get something we want. ''' def __init__(self, width, height, padding): super().__init__(width, height, padding) # call constructor for inherited class def _lon_to_x(self, lon, zoom): """ transform longitude to tile number :type lon: float :type zoom: int :rtype: float """ if not (-HALF_DEGREE <= lon <= HALF_DEGREE): lon = (lon + HALF_DEGREE) % FULL_DEGREE - HALF_DEGREE return ((lon + HALF_DEGREE) / FULL_DEGREE) * pow(2, zoom) def _lat_to_y(self, lat, zoom): """ transform latitude to tile number :type lat: float :type zoom: int :rtype: float """ if not (-RECT <= lat <= RECT): lat = (lat + RECT) % HALF_DEGREE - RECT return (1 - log(tan(lat * pi / HALF_DEGREE) + 1 / cos(lat * pi / HALF_DEGREE)) / pi) \ / 2 * pow(2, zoom) def _simplify(self, points, tolerance=11): """ :param points: list of lon-lat pairs :type points: list :param tolerance: tolerance in pixel :type tolerance: float :return: list of lon-lat pairs :rtype: list """ if not points: return points new_coords = [points[0]] for point in points[1:-1]: last = new_coords[-1] dist = sqrt(pow(last[0] - point[0], 2) + pow(last[1] - point[1], 2)) if dist > tolerance: new_coords.append(point) new_coords.append(points[-1]) return new_coords def extract_line_points(self): ''' This method is not in the original API This function extract line features. :return: points((px, py),(),...) ''' for line in self.lines: points = [( self._x_to_px(self._lon_to_x(coord[0], self.zoom)), self._y_to_px(self._lat_to_y(coord[1], self.zoom)), ) for coord in line.coords] if line.simplify: points = self._simplify(points) return points def render_without_features(self, zoom=None, center=None): """ render static map with all map features that were added to map before :param zoom: optional zoom level, will be optimized automatically if not given. :type zoom: int :param center: optional center of map, will be set automatically from markers if not given. :type center: list :return: PIL image instance :rtype: Image.Image """ if not self.lines and not self.markers and not self.polygons and not (center and zoom): raise RuntimeError("cannot render empty map, add lines / markers / polygons first") if zoom is None: self.zoom = self._calculate_zoom() else: self.zoom = zoom if center: self.x_center = self._lon_to_x(center[0], self.zoom) self.y_center = self._lat_to_y(center[1], self.zoom) else: # get extent of all lines extent = self.determine_extent(zoom=self.zoom) # calculate center point of map lon_center, lat_center = (extent[0] + extent[2]) / 2, (extent[1] + extent[3]) / 2 self.x_center = self._lon_to_x(lon_center, self.zoom) self.y_center = self._lat_to_y(lat_center, self.zoom) image = Image.new('RGB', (self.width, self.height), self.background_color) self._draw_base_layer(image) # This is what draws the features in the original class - which we don't want # self._draw_features(image) return image
/robot-data-visualizer-0.3.tar.gz/robot-data-visualizer-0.3/tools/static_map_base_layer.py
0.822332
0.473109
static_map_base_layer.py
pypi
import math import numpy as np import matplotlib.pyplot as plt import datetime import sys sys.path.append('..') import traceback from tools.data_manager import DataManager # ICP parameters EPS = 0.0001 MAXITER = 100 show_animation = True def ICP_matching(ppoints, cpoints,time): """ Iterative Closest Point matching - input ppoints: 2D points in the previous frame cpoints: 2D points in the current frame - output R: Rotation matrix T: Translation vector """ H = None # homogeneraous transformation matrix dError = 1000.0 preError = 1000.0 count = 0 LOWERBOUND = 0 UPPERBOUND = 2 DIMENSION = 3 while dError >= EPS: count += 1 ''' if show_animation: plt.cla() plt.plot(ppoints[0, :], ppoints[1, :], ".r") plt.plot(cpoints[0, :], cpoints[1, :], ".b") plt.plot(0.0, 0.0, "xr") plt.axis("equal") plt.title(time) plt.pause(0.1) ''' inds, error = nearest_neighbor_assosiation(ppoints, cpoints) Rt, Tt = SVD_motion_estimation(ppoints[:, inds], cpoints) # update current points cpoints = (Rt * cpoints) + Tt H = update_homogeneous_matrix(H, Rt, Tt) dError = abs(preError - error) preError = error #print("Residual:", error) if dError <= EPS: #print("Converge", error, dError, count) break elif MAXITER <= count: print("Not Converge...", error, dError, count) break R = np.matrix(H[LOWERBOUND:UPPERBOUND, LOWERBOUND:UPPERBOUND]) T = np.matrix(H[LOWERBOUND:UPPERBOUND, UPPERBOUND]) return R, T def update_homogeneous_matrix(Hin, R, T): """ Update the homogeneous matrix (translation and rotation matrices combined) - input Hin: initial/previous homogeneous matrix R: rotation matrix T: Translation matrix - output H: updated homogeneous matrix Parameters ---------- H The updated homogeneous matrix. Hin The initial/previous homogeneous matrix. R The rotation matrix. T The translation matrix. """ H = np.matrix(np.zeros((3, 3))) H[0, 0] = R[0, 0] H[1, 0] = R[1, 0] H[0, 1] = R[0, 1] H[1, 1] = R[1, 1] H[2, 2] = 1.0 H[0, 2] = T[0, 0] H[1, 2] = T[1, 0] if Hin is not None: H = Hin * H return H def nearest_neighbor_assosiation(ppoints, cpoints): """ Associates new LIDAR points with previous LIDAR points - input ppoints: 2D points in the previous frame cpoints: 2D points in the current frame - output inds: indices error: normalized difference in current and previous 2D points """ # calculate the sum of residual errors dcpoints = ppoints - cpoints d = np.linalg.norm(dcpoints, axis=0) error = sum(d) # calc index with nearest neighbor association inds = [] for i in range(cpoints.shape[1]): minid = -1 mind = float("inf") for ii in range(ppoints.shape[1]): d = np.linalg.norm(ppoints[:, ii] - cpoints[:, i]) if mind >= d: mind = d minid = ii inds.append(minid) return inds, error def SVD_motion_estimation(ppoints, cpoints): """ performs single value decomposition to determine rotation and translation - input ppoints: 2D points in the previous frame cpoints: 2D points in the current frame - output R: rotation angle t: translation """ pm = np.matrix(np.mean(ppoints, axis=1)) cm = np.matrix(np.mean(cpoints, axis=1)) pshift = np.matrix(ppoints - pm) cshift = np.matrix(cpoints - cm) W = cshift * pshift.T u, s, vh = np.linalg.svd(W) R = (u * vh).T t = pm - R * cm return R, t def choose_lidar_pts(i, data_i): min_lidar_pts = 100 x, y, time = data_i #print(x) x_index = np.nonzero(x) x_index_str = str(x_index) #print(x_index) y_index = np.nonzero(y) #print(y_index) y_index_str = str(y_index) #print(y_index) is_equal = x_index_str == y_index_str enough_lidar = len(x) > min_lidar_pts x = x[np.nonzero(x)] y = y[np.nonzero(y)] if is_equal == True & enough_lidar == True: return (x, y , time) else: x = i y = i time = i return (x, y, time) def main(): print(__file__ + " start!!") # simulation parameters #specify date dm = DataManager('2012-04-29') print('DataManager initialized') # Download and extract sensor data dm.setup_data_files('sensor_data') print('sensor data downloaded') # Download and extract data for the hokuyo lidar scanner dm.setup_data_files('hokuyo') # load scans of lidar num_samples = 10000 step_size = 10 delay = 0.1 print('hokuyo data loading...') dm.load_lidar(num_samples, 'pickled', 'delete') lidar = dm.data_dict['lidar'] print('running SLAM') for i in range(0,int(num_samples/step_size),step_size): #get previous and current points k = i k_next = k + 1 j = -1 lidar_i = lidar[i] x, y, time = choose_lidar_pts(i, lidar_i) x_not_equal_time = str(x) != str(time) if x_not_equal_time == True: #previous points px, py, time = x, y, time check_length = len(x) == len(y) ppoints = np.matrix(np.vstack((px, py))) #current points lidar_k = lidar[k] x2,y2, time2 = choose_lidar_pts(k,lidar_k) x2_not_equal_time2 = str(x2) != str(time2) if x2_not_equal_time2: cx, cy, time = choose_lidar_pts(k,lidar_k) cpoints = np.matrix(np.vstack((cx, cy))) else: while k != j: k = k_next x2, y2, time2 = choose_lidar_pts(k, lidar_k) x2_not_equal_time2 = str(x2) != str(time2) if x2_not_equal_time2: cx, cy, time2 = choose_lidar_pts(k, lidar_k) cpoints = np.matrix(np.vstack((cx, cy))) j = k i = k R, T = ICP_matching(ppoints, cpoints,time2) #print('R =', R) #print('T =', T) if i == 0: pose_xy = np.array([[0],[0]]) pose_uv = np.array([[0],[1]]) ''' X = 0 Y = 0 THETA = 0 U = np.cos(THETA) V = np.sin(THETA) ''' else: pose_xy = np.array([X, Y]) pose_uv = np.array([U, V]) new_pose_xy= pose_xy + np.array(T) a = np.array(R) new_pose_uv= a.dot(pose_uv) X = new_pose_xy[0] Y = new_pose_xy[1] U = new_pose_uv[0] V = new_pose_uv[1] plt.quiver(X, Y, U, V) plt.axis("equal") plt.title(time) plt.pause(delay) plt.cla() if __name__ == '__main__': main()
/robot-data-visualizer-0.3.tar.gz/robot-data-visualizer-0.3/misc/iterative_closest_point.py
0.463201
0.595669
iterative_closest_point.py
pypi
from matplotlib.patches import Arc import matplotlib.pyplot as plt def hokuyo_plot(x_lidar, y_lidar, time): """ Creates plot of xy lidar data with lidar range arc plotted in blue line. Keyword arguments: x_lidar -- x components of lidar scan y_lidar -- y components of lidar scan time -- time stamp of lidar scan """ # copied and modified from: # https://github.com/matplotlib/matplotlib/issues/8046/#issuecomment-278312361 # Circle parameters # set arc diameters diameter = [2, 60] #arc center center = 0 #time to wait between plots delay = 0.1 # Figure setup fig, axes = plt.subplots() # delete unused argument 'fig' del fig #plot inner arc axes.add_patch(Arc((center, center), diameter[0], diameter[0], theta1=-45, theta2=225, edgecolor='b', linewidth=1.5)) #plot outer arc axes.add_patch(Arc((center, center), diameter[1], diameter[1], theta1=-45, theta2=225, edgecolor='b', linewidth=1.5)) # start and end points for lidar boundary lines line_start_end = [0.8, 21.21] # x & y coordinates for line 1 line_start_x, line_end_x = line_start_end[0], line_start_end[1] line_start_y, line_end_y = -line_start_end[0], -line_start_end[1] # plot line 1 plt.plot([line_start_x, line_end_x], [line_start_y, line_end_y], 'b') # x & y coordinates for line 2 line_start_x, line_end_x = -line_start_end[0], -line_start_end[1] line_start_y, line_end_y = -line_start_end[0], -line_start_end[1] # plot line 2 plt.plot([line_start_x, line_end_x], [line_start_y, line_end_y], 'b') # plot lidar points plt.plot(x_lidar, y_lidar, '.') # plot title of time in datetime format plt.title(time) # label x axis plt.xlabel('Distance (meters)') # set axis limits axes = plt.gca() xlimits, ylimits = [-32, 32], [-32, 32] axes.set_xlim(xlimits) axes.set_ylim(ylimits) # wait to close plot for 'delay' seconds plt.pause(delay) # close plot plt.close()
/robot-data-visualizer-0.3.tar.gz/robot-data-visualizer-0.3/misc/plot_lidar.py
0.785061
0.653072
plot_lidar.py
pypi
[RFC6265](https://tools.ietf.org/html/rfc6265) Cookies and CookieJar for Node.js [![npm package](https://nodei.co/npm/tough-cookie.png?downloads=true&downloadRank=true&stars=true)](https://nodei.co/npm/tough-cookie/) [![Build Status](https://travis-ci.org/salesforce/tough-cookie.png?branch=master)](https://travis-ci.org/salesforce/tough-cookie) # Synopsis ``` javascript var tough = require('tough-cookie'); var Cookie = tough.Cookie; var cookie = Cookie.parse(header); cookie.value = 'somethingdifferent'; header = cookie.toString(); var cookiejar = new tough.CookieJar(); cookiejar.setCookie(cookie, 'http://currentdomain.example.com/path', cb); // ... cookiejar.getCookies('http://example.com/otherpath',function(err,cookies) { res.headers['cookie'] = cookies.join('; '); }); ``` # Installation It's _so_ easy! `npm install tough-cookie` Why the name? NPM modules `cookie`, `cookies` and `cookiejar` were already taken. ## Version Support Support for versions of node.js will follow that of the [request](https://www.npmjs.com/package/request) module. # API ## tough Functions on the module you get from `require('tough-cookie')`. All can be used as pure functions and don't need to be "bound". **Note**: prior to 1.0.x, several of these functions took a `strict` parameter. This has since been removed from the API as it was no longer necessary. ### `parseDate(string)` Parse a cookie date string into a `Date`. Parses according to RFC6265 Section 5.1.1, not `Date.parse()`. ### `formatDate(date)` Format a Date into a RFC1123 string (the RFC6265-recommended format). ### `canonicalDomain(str)` Transforms a domain-name into a canonical domain-name. The canonical domain-name is a trimmed, lowercased, stripped-of-leading-dot and optionally punycode-encoded domain-name (Section 5.1.2 of RFC6265). For the most part, this function is idempotent (can be run again on its output without ill effects). ### `domainMatch(str,domStr[,canonicalize=true])` Answers "does this real domain match the domain in a cookie?". The `str` is the "current" domain-name and the `domStr` is the "cookie" domain-name. Matches according to RFC6265 Section 5.1.3, but it helps to think of it as a "suffix match". The `canonicalize` parameter will run the other two parameters through `canonicalDomain` or not. ### `defaultPath(path)` Given a current request/response path, gives the Path apropriate for storing in a cookie. This is basically the "directory" of a "file" in the path, but is specified by Section 5.1.4 of the RFC. The `path` parameter MUST be _only_ the pathname part of a URI (i.e. excludes the hostname, query, fragment, etc.). This is the `.pathname` property of node's `uri.parse()` output. ### `pathMatch(reqPath,cookiePath)` Answers "does the request-path path-match a given cookie-path?" as per RFC6265 Section 5.1.4. Returns a boolean. This is essentially a prefix-match where `cookiePath` is a prefix of `reqPath`. ### `parse(cookieString[, options])` alias for `Cookie.parse(cookieString[, options])` ### `fromJSON(string)` alias for `Cookie.fromJSON(string)` ### `getPublicSuffix(hostname)` Returns the public suffix of this hostname. The public suffix is the shortest domain-name upon which a cookie can be set. Returns `null` if the hostname cannot have cookies set for it. For example: `www.example.com` and `www.subdomain.example.com` both have public suffix `example.com`. For further information, see http://publicsuffix.org/. This module derives its list from that site. This call is currently a wrapper around [`psl`](https://www.npmjs.com/package/psl)'s [get() method](https://www.npmjs.com/package/psl#pslgetdomain). ### `cookieCompare(a,b)` For use with `.sort()`, sorts a list of cookies into the recommended order given in the RFC (Section 5.4 step 2). The sort algorithm is, in order of precedence: * Longest `.path` * oldest `.creation` (which has a 1ms precision, same as `Date`) * lowest `.creationIndex` (to get beyond the 1ms precision) ``` javascript var cookies = [ /* unsorted array of Cookie objects */ ]; cookies = cookies.sort(cookieCompare); ``` **Note**: Since JavaScript's `Date` is limited to a 1ms precision, cookies within the same milisecond are entirely possible. This is especially true when using the `now` option to `.setCookie()`. The `.creationIndex` property is a per-process global counter, assigned during construction with `new Cookie()`. This preserves the spirit of the RFC sorting: older cookies go first. This works great for `MemoryCookieStore`, since `Set-Cookie` headers are parsed in order, but may not be so great for distributed systems. Sophisticated `Store`s may wish to set this to some other _logical clock_ such that if cookies A and B are created in the same millisecond, but cookie A is created before cookie B, then `A.creationIndex < B.creationIndex`. If you want to alter the global counter, which you probably _shouldn't_ do, it's stored in `Cookie.cookiesCreated`. ### `permuteDomain(domain)` Generates a list of all possible domains that `domainMatch()` the parameter. May be handy for implementing cookie stores. ### `permutePath(path)` Generates a list of all possible paths that `pathMatch()` the parameter. May be handy for implementing cookie stores. ## Cookie Exported via `tough.Cookie`. ### `Cookie.parse(cookieString[, options])` Parses a single Cookie or Set-Cookie HTTP header into a `Cookie` object. Returns `undefined` if the string can't be parsed. The options parameter is not required and currently has only one property: * _loose_ - boolean - if `true` enable parsing of key-less cookies like `=abc` and `=`, which are not RFC-compliant. If options is not an object, it is ignored, which means you can use `Array#map` with it. Here's how to process the Set-Cookie header(s) on a node HTTP/HTTPS response: ``` javascript if (res.headers['set-cookie'] instanceof Array) cookies = res.headers['set-cookie'].map(Cookie.parse); else cookies = [Cookie.parse(res.headers['set-cookie'])]; ``` _Note:_ in version 2.3.3, tough-cookie limited the number of spaces before the `=` to 256 characters. This limitation has since been removed. See [Issue 92](https://github.com/salesforce/tough-cookie/issues/92) ### Properties Cookie object properties: * _key_ - string - the name or key of the cookie (default "") * _value_ - string - the value of the cookie (default "") * _expires_ - `Date` - if set, the `Expires=` attribute of the cookie (defaults to the string `"Infinity"`). See `setExpires()` * _maxAge_ - seconds - if set, the `Max-Age=` attribute _in seconds_ of the cookie. May also be set to strings `"Infinity"` and `"-Infinity"` for non-expiry and immediate-expiry, respectively. See `setMaxAge()` * _domain_ - string - the `Domain=` attribute of the cookie * _path_ - string - the `Path=` of the cookie * _secure_ - boolean - the `Secure` cookie flag * _httpOnly_ - boolean - the `HttpOnly` cookie flag * _extensions_ - `Array` - any unrecognized cookie attributes as strings (even if equal-signs inside) * _creation_ - `Date` - when this cookie was constructed * _creationIndex_ - number - set at construction, used to provide greater sort precision (please see `cookieCompare(a,b)` for a full explanation) After a cookie has been passed through `CookieJar.setCookie()` it will have the following additional attributes: * _hostOnly_ - boolean - is this a host-only cookie (i.e. no Domain field was set, but was instead implied) * _pathIsDefault_ - boolean - if true, there was no Path field on the cookie and `defaultPath()` was used to derive one. * _creation_ - `Date` - **modified** from construction to when the cookie was added to the jar * _lastAccessed_ - `Date` - last time the cookie got accessed. Will affect cookie cleaning once implemented. Using `cookiejar.getCookies(...)` will update this attribute. ### `Cookie([{properties}])` Receives an options object that can contain any of the above Cookie properties, uses the default for unspecified properties. ### `.toString()` encode to a Set-Cookie header value. The Expires cookie field is set using `formatDate()`, but is omitted entirely if `.expires` is `Infinity`. ### `.cookieString()` encode to a Cookie header value (i.e. the `.key` and `.value` properties joined with '='). ### `.setExpires(String)` sets the expiry based on a date-string passed through `parseDate()`. If parseDate returns `null` (i.e. can't parse this date string), `.expires` is set to `"Infinity"` (a string) is set. ### `.setMaxAge(number)` sets the maxAge in seconds. Coerces `-Infinity` to `"-Infinity"` and `Infinity` to `"Infinity"` so it JSON serializes correctly. ### `.expiryTime([now=Date.now()])` ### `.expiryDate([now=Date.now()])` expiryTime() Computes the absolute unix-epoch milliseconds that this cookie expires. expiryDate() works similarly, except it returns a `Date` object. Note that in both cases the `now` parameter should be milliseconds. Max-Age takes precedence over Expires (as per the RFC). The `.creation` attribute -- or, by default, the `now` parameter -- is used to offset the `.maxAge` attribute. If Expires (`.expires`) is set, that's returned. Otherwise, `expiryTime()` returns `Infinity` and `expiryDate()` returns a `Date` object for "Tue, 19 Jan 2038 03:14:07 GMT" (latest date that can be expressed by a 32-bit `time_t`; the common limit for most user-agents). ### `.TTL([now=Date.now()])` compute the TTL relative to `now` (milliseconds). The same precedence rules as for `expiryTime`/`expiryDate` apply. The "number" `Infinity` is returned for cookies without an explicit expiry and `0` is returned if the cookie is expired. Otherwise a time-to-live in milliseconds is returned. ### `.canonicalizedDoman()` ### `.cdomain()` return the canonicalized `.domain` field. This is lower-cased and punycode (RFC3490) encoded if the domain has any non-ASCII characters. ### `.toJSON()` For convenience in using `JSON.serialize(cookie)`. Returns a plain-old `Object` that can be JSON-serialized. Any `Date` properties (i.e., `.expires`, `.creation`, and `.lastAccessed`) are exported in ISO format (`.toISOString()`). **NOTE**: Custom `Cookie` properties will be discarded. In tough-cookie 1.x, since there was no `.toJSON` method explicitly defined, all enumerable properties were captured. If you want a property to be serialized, add the property name to the `Cookie.serializableProperties` Array. ### `Cookie.fromJSON(strOrObj)` Does the reverse of `cookie.toJSON()`. If passed a string, will `JSON.parse()` that first. Any `Date` properties (i.e., `.expires`, `.creation`, and `.lastAccessed`) are parsed via `Date.parse()`, not the tough-cookie `parseDate`, since it's JavaScript/JSON-y timestamps being handled at this layer. Returns `null` upon JSON parsing error. ### `.clone()` Does a deep clone of this cookie, exactly implemented as `Cookie.fromJSON(cookie.toJSON())`. ### `.validate()` Status: *IN PROGRESS*. Works for a few things, but is by no means comprehensive. validates cookie attributes for semantic correctness. Useful for "lint" checking any Set-Cookie headers you generate. For now, it returns a boolean, but eventually could return a reason string -- you can future-proof with this construct: ``` javascript if (cookie.validate() === true) { // it's tasty } else { // yuck! } ``` ## CookieJar Exported via `tough.CookieJar`. ### `CookieJar([store],[options])` Simply use `new CookieJar()`. If you'd like to use a custom store, pass that to the constructor otherwise a `MemoryCookieStore` will be created and used. The `options` object can be omitted and can have the following properties: * _rejectPublicSuffixes_ - boolean - default `true` - reject cookies with domains like "com" and "co.uk" * _looseMode_ - boolean - default `false` - accept malformed cookies like `bar` and `=bar`, which have an implied empty name. This is not in the standard, but is used sometimes on the web and is accepted by (most) browsers. Since eventually this module would like to support database/remote/etc. CookieJars, continuation passing style is used for CookieJar methods. ### `.setCookie(cookieOrString, currentUrl, [{options},] cb(err,cookie))` Attempt to set the cookie in the cookie jar. If the operation fails, an error will be given to the callback `cb`, otherwise the cookie is passed through. The cookie will have updated `.creation`, `.lastAccessed` and `.hostOnly` properties. The `options` object can be omitted and can have the following properties: * _http_ - boolean - default `true` - indicates if this is an HTTP or non-HTTP API. Affects HttpOnly cookies. * _secure_ - boolean - autodetect from url - indicates if this is a "Secure" API. If the currentUrl starts with `https:` or `wss:` then this is defaulted to `true`, otherwise `false`. * _now_ - Date - default `new Date()` - what to use for the creation/access time of cookies * _ignoreError_ - boolean - default `false` - silently ignore things like parse errors and invalid domains. `Store` errors aren't ignored by this option. As per the RFC, the `.hostOnly` property is set if there was no "Domain=" parameter in the cookie string (or `.domain` was null on the Cookie object). The `.domain` property is set to the fully-qualified hostname of `currentUrl` in this case. Matching this cookie requires an exact hostname match (not a `domainMatch` as per usual). ### `.setCookieSync(cookieOrString, currentUrl, [{options}])` Synchronous version of `setCookie`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). ### `.getCookies(currentUrl, [{options},] cb(err,cookies))` Retrieve the list of cookies that can be sent in a Cookie header for the current url. If an error is encountered, that's passed as `err` to the callback, otherwise an `Array` of `Cookie` objects is passed. The array is sorted with `cookieCompare()` unless the `{sort:false}` option is given. The `options` object can be omitted and can have the following properties: * _http_ - boolean - default `true` - indicates if this is an HTTP or non-HTTP API. Affects HttpOnly cookies. * _secure_ - boolean - autodetect from url - indicates if this is a "Secure" API. If the currentUrl starts with `https:` or `wss:` then this is defaulted to `true`, otherwise `false`. * _now_ - Date - default `new Date()` - what to use for the creation/access time of cookies * _expire_ - boolean - default `true` - perform expiry-time checking of cookies and asynchronously remove expired cookies from the store. Using `false` will return expired cookies and **not** remove them from the store (which is useful for replaying Set-Cookie headers, potentially). * _allPaths_ - boolean - default `false` - if `true`, do not scope cookies by path. The default uses RFC-compliant path scoping. **Note**: may not be supported by the underlying store (the default `MemoryCookieStore` supports it). The `.lastAccessed` property of the returned cookies will have been updated. ### `.getCookiesSync(currentUrl, [{options}])` Synchronous version of `getCookies`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). ### `.getCookieString(...)` Accepts the same options as `.getCookies()` but passes a string suitable for a Cookie header rather than an array to the callback. Simply maps the `Cookie` array via `.cookieString()`. ### `.getCookieStringSync(...)` Synchronous version of `getCookieString`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). ### `.getSetCookieStrings(...)` Returns an array of strings suitable for **Set-Cookie** headers. Accepts the same options as `.getCookies()`. Simply maps the cookie array via `.toString()`. ### `.getSetCookieStringsSync(...)` Synchronous version of `getSetCookieStrings`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). ### `.serialize(cb(err,serializedObject))` Serialize the Jar if the underlying store supports `.getAllCookies`. **NOTE**: Custom `Cookie` properties will be discarded. If you want a property to be serialized, add the property name to the `Cookie.serializableProperties` Array. See [Serialization Format]. ### `.serializeSync()` Sync version of .serialize ### `.toJSON()` Alias of .serializeSync() for the convenience of `JSON.stringify(cookiejar)`. ### `CookieJar.deserialize(serialized, [store], cb(err,object))` A new Jar is created and the serialized Cookies are added to the underlying store. Each `Cookie` is added via `store.putCookie` in the order in which they appear in the serialization. The `store` argument is optional, but should be an instance of `Store`. By default, a new instance of `MemoryCookieStore` is created. As a convenience, if `serialized` is a string, it is passed through `JSON.parse` first. If that throws an error, this is passed to the callback. ### `CookieJar.deserializeSync(serialized, [store])` Sync version of `.deserialize`. _Note_ that the `store` must be synchronous for this to work. ### `CookieJar.fromJSON(string)` Alias of `.deserializeSync` to provide consistency with `Cookie.fromJSON()`. ### `.clone([store,]cb(err,newJar))` Produces a deep clone of this jar. Modifications to the original won't affect the clone, and vice versa. The `store` argument is optional, but should be an instance of `Store`. By default, a new instance of `MemoryCookieStore` is created. Transferring between store types is supported so long as the source implements `.getAllCookies()` and the destination implements `.putCookie()`. ### `.cloneSync([store])` Synchronous version of `.clone`, returning a new `CookieJar` instance. The `store` argument is optional, but must be a _synchronous_ `Store` instance if specified. If not passed, a new instance of `MemoryCookieStore` is used. The _source_ and _destination_ must both be synchronous `Store`s. If one or both stores are asynchronous, use `.clone` instead. Recall that `MemoryCookieStore` supports both synchronous and asynchronous API calls. ## Store Base class for CookieJar stores. Available as `tough.Store`. ## Store API The storage model for each `CookieJar` instance can be replaced with a custom implementation. The default is `MemoryCookieStore` which can be found in the `lib/memstore.js` file. The API uses continuation-passing-style to allow for asynchronous stores. Stores should inherit from the base `Store` class, which is available as `require('tough-cookie').Store`. Stores are asynchronous by default, but if `store.synchronous` is set to `true`, then the `*Sync` methods on the of the containing `CookieJar` can be used (however, the continuation-passing style All `domain` parameters will have been normalized before calling. The Cookie store must have all of the following methods. ### `store.findCookie(domain, path, key, cb(err,cookie))` Retrieve a cookie with the given domain, path and key (a.k.a. name). The RFC maintains that exactly one of these cookies should exist in a store. If the store is using versioning, this means that the latest/newest such cookie should be returned. Callback takes an error and the resulting `Cookie` object. If no cookie is found then `null` MUST be passed instead (i.e. not an error). ### `store.findCookies(domain, path, cb(err,cookies))` Locates cookies matching the given domain and path. This is most often called in the context of `cookiejar.getCookies()` above. If no cookies are found, the callback MUST be passed an empty array. The resulting list will be checked for applicability to the current request according to the RFC (domain-match, path-match, http-only-flag, secure-flag, expiry, etc.), so it's OK to use an optimistic search algorithm when implementing this method. However, the search algorithm used SHOULD try to find cookies that `domainMatch()` the domain and `pathMatch()` the path in order to limit the amount of checking that needs to be done. As of version 0.9.12, the `allPaths` option to `cookiejar.getCookies()` above will cause the path here to be `null`. If the path is `null`, path-matching MUST NOT be performed (i.e. domain-matching only). ### `store.putCookie(cookie, cb(err))` Adds a new cookie to the store. The implementation SHOULD replace any existing cookie with the same `.domain`, `.path`, and `.key` properties -- depending on the nature of the implementation, it's possible that between the call to `fetchCookie` and `putCookie` that a duplicate `putCookie` can occur. The `cookie` object MUST NOT be modified; the caller will have already updated the `.creation` and `.lastAccessed` properties. Pass an error if the cookie cannot be stored. ### `store.updateCookie(oldCookie, newCookie, cb(err))` Update an existing cookie. The implementation MUST update the `.value` for a cookie with the same `domain`, `.path` and `.key`. The implementation SHOULD check that the old value in the store is equivalent to `oldCookie` - how the conflict is resolved is up to the store. The `.lastAccessed` property will always be different between the two objects (to the precision possible via JavaScript's clock). Both `.creation` and `.creationIndex` are guaranteed to be the same. Stores MAY ignore or defer the `.lastAccessed` change at the cost of affecting how cookies are selected for automatic deletion (e.g., least-recently-used, which is up to the store to implement). Stores may wish to optimize changing the `.value` of the cookie in the store versus storing a new cookie. If the implementation doesn't define this method a stub that calls `putCookie(newCookie,cb)` will be added to the store object. The `newCookie` and `oldCookie` objects MUST NOT be modified. Pass an error if the newCookie cannot be stored. ### `store.removeCookie(domain, path, key, cb(err))` Remove a cookie from the store (see notes on `findCookie` about the uniqueness constraint). The implementation MUST NOT pass an error if the cookie doesn't exist; only pass an error due to the failure to remove an existing cookie. ### `store.removeCookies(domain, path, cb(err))` Removes matching cookies from the store. The `path` parameter is optional, and if missing means all paths in a domain should be removed. Pass an error ONLY if removing any existing cookies failed. ### `store.getAllCookies(cb(err, cookies))` Produces an `Array` of all cookies during `jar.serialize()`. The items in the array can be true `Cookie` objects or generic `Object`s with the [Serialization Format] data structure. Cookies SHOULD be returned in creation order to preserve sorting via `compareCookies()`. For reference, `MemoryCookieStore` will sort by `.creationIndex` since it uses true `Cookie` objects internally. If you don't return the cookies in creation order, they'll still be sorted by creation time, but this only has a precision of 1ms. See `compareCookies` for more detail. Pass an error if retrieval fails. ## MemoryCookieStore Inherits from `Store`. A just-in-memory CookieJar synchronous store implementation, used by default. Despite being a synchronous implementation, it's usable with both the synchronous and asynchronous forms of the `CookieJar` API. ## Community Cookie Stores These are some Store implementations authored and maintained by the community. They aren't official and we don't vouch for them but you may be interested to have a look: - [`db-cookie-store`](https://github.com/JSBizon/db-cookie-store): SQL including SQLite-based databases - [`file-cookie-store`](https://github.com/JSBizon/file-cookie-store): Netscape cookie file format on disk - [`redis-cookie-store`](https://github.com/benkroeger/redis-cookie-store): Redis - [`tough-cookie-filestore`](https://github.com/mitsuru/tough-cookie-filestore): JSON on disk - [`tough-cookie-web-storage-store`](https://github.com/exponentjs/tough-cookie-web-storage-store): DOM localStorage and sessionStorage # Serialization Format **NOTE**: if you want to have custom `Cookie` properties serialized, add the property name to `Cookie.serializableProperties`. ```js { // The version of tough-cookie that serialized this jar. version: 'tough-cookie@1.x.y', // add the store type, to make humans happy: storeType: 'MemoryCookieStore', // CookieJar configuration: rejectPublicSuffixes: true, // ... future items go here // Gets filled from jar.store.getAllCookies(): cookies: [ { key: 'string', value: 'string', // ... /* other Cookie.serializableProperties go here */ } ] } ``` # Copyright and License (tl;dr: BSD-3-Clause with some MPL/2.0) ```text Copyright (c) 2015, Salesforce.com, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/tough-cookie/README.md
0.895919
0.891197
README.md
pypi
'use strict'; const colorConvert = require('color-convert'); const wrapAnsi16 = (fn, offset) => function () { const code = fn.apply(colorConvert, arguments); return `\u001B[${code + offset}m`; }; const wrapAnsi256 = (fn, offset) => function () { const code = fn.apply(colorConvert, arguments); return `\u001B[${38 + offset};5;${code}m`; }; const wrapAnsi16m = (fn, offset) => function () { const rgb = fn.apply(colorConvert, arguments); return `\u001B[${38 + offset};2;${rgb[0]};${rgb[1]};${rgb[2]}m`; }; function assembleStyles() { const codes = new Map(); const styles = { modifier: { reset: [0, 0], // 21 isn't widely supported and 22 does the same thing bold: [1, 22], dim: [2, 22], italic: [3, 23], underline: [4, 24], inverse: [7, 27], hidden: [8, 28], strikethrough: [9, 29] }, color: { black: [30, 39], red: [31, 39], green: [32, 39], yellow: [33, 39], blue: [34, 39], magenta: [35, 39], cyan: [36, 39], white: [37, 39], gray: [90, 39], // Bright color redBright: [91, 39], greenBright: [92, 39], yellowBright: [93, 39], blueBright: [94, 39], magentaBright: [95, 39], cyanBright: [96, 39], whiteBright: [97, 39] }, bgColor: { bgBlack: [40, 49], bgRed: [41, 49], bgGreen: [42, 49], bgYellow: [43, 49], bgBlue: [44, 49], bgMagenta: [45, 49], bgCyan: [46, 49], bgWhite: [47, 49], // Bright color bgBlackBright: [100, 49], bgRedBright: [101, 49], bgGreenBright: [102, 49], bgYellowBright: [103, 49], bgBlueBright: [104, 49], bgMagentaBright: [105, 49], bgCyanBright: [106, 49], bgWhiteBright: [107, 49] } }; // Fix humans styles.color.grey = styles.color.gray; for (const groupName of Object.keys(styles)) { const group = styles[groupName]; for (const styleName of Object.keys(group)) { const style = group[styleName]; styles[styleName] = { open: `\u001B[${style[0]}m`, close: `\u001B[${style[1]}m` }; group[styleName] = styles[styleName]; codes.set(style[0], style[1]); } Object.defineProperty(styles, groupName, { value: group, enumerable: false }); Object.defineProperty(styles, 'codes', { value: codes, enumerable: false }); } const ansi2ansi = n => n; const rgb2rgb = (r, g, b) => [r, g, b]; styles.color.close = '\u001B[39m'; styles.bgColor.close = '\u001B[49m'; styles.color.ansi = { ansi: wrapAnsi16(ansi2ansi, 0) }; styles.color.ansi256 = { ansi256: wrapAnsi256(ansi2ansi, 0) }; styles.color.ansi16m = { rgb: wrapAnsi16m(rgb2rgb, 0) }; styles.bgColor.ansi = { ansi: wrapAnsi16(ansi2ansi, 10) }; styles.bgColor.ansi256 = { ansi256: wrapAnsi256(ansi2ansi, 10) }; styles.bgColor.ansi16m = { rgb: wrapAnsi16m(rgb2rgb, 10) }; for (let key of Object.keys(colorConvert)) { if (typeof colorConvert[key] !== 'object') { continue; } const suite = colorConvert[key]; if (key === 'ansi16') { key = 'ansi'; } if ('ansi16' in suite) { styles.color.ansi[key] = wrapAnsi16(suite.ansi16, 0); styles.bgColor.ansi[key] = wrapAnsi16(suite.ansi16, 10); } if ('ansi256' in suite) { styles.color.ansi256[key] = wrapAnsi256(suite.ansi256, 0); styles.bgColor.ansi256[key] = wrapAnsi256(suite.ansi256, 10); } if ('rgb' in suite) { styles.color.ansi16m[key] = wrapAnsi16m(suite.rgb, 0); styles.bgColor.ansi16m[key] = wrapAnsi16m(suite.rgb, 10); } } return styles; } // Make the export immutable Object.defineProperty(module, 'exports', { enumerable: true, get: assembleStyles });
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/ansi-styles/index.js
0.572364
0.552962
index.js
pypi
<h1 align="center"> <br> <br> <img width="320" src="media/logo.svg" alt="Chalk"> <br> <br> <br> </h1> > Terminal string styling done right [![Build Status](https://travis-ci.org/chalk/chalk.svg?branch=master)](https://travis-ci.org/chalk/chalk) [![Coverage Status](https://coveralls.io/repos/github/chalk/chalk/badge.svg?branch=master)](https://coveralls.io/github/chalk/chalk?branch=master) [![](https://img.shields.io/badge/unicorn-approved-ff69b4.svg)](https://www.youtube.com/watch?v=9auOCbH5Ns4) [![XO code style](https://img.shields.io/badge/code_style-XO-5ed9c7.svg)](https://github.com/xojs/xo) [![Mentioned in Awesome Node.js](https://awesome.re/mentioned-badge.svg)](https://github.com/sindresorhus/awesome-nodejs) ### [See what's new in Chalk 2](https://github.com/chalk/chalk/releases/tag/v2.0.0) <img src="https://cdn.rawgit.com/chalk/ansi-styles/8261697c95bf34b6c7767e2cbe9941a851d59385/screenshot.svg" alt="" width="900"> ## Highlights - Expressive API - Highly performant - Ability to nest styles - [256/Truecolor color support](#256-and-truecolor-color-support) - Auto-detects color support - Doesn't extend `String.prototype` - Clean and focused - Actively maintained - [Used by ~23,000 packages](https://www.npmjs.com/browse/depended/chalk) as of December 31, 2017 ## Install ```console $ npm install chalk ``` <a href="https://www.patreon.com/sindresorhus"> <img src="https://c5.patreon.com/external/logo/become_a_patron_button@2x.png" width="160"> </a> ## Usage ```js const chalk = require('chalk'); console.log(chalk.blue('Hello world!')); ``` Chalk comes with an easy to use composable API where you just chain and nest the styles you want. ```js const chalk = require('chalk'); const log = console.log; // Combine styled and normal strings log(chalk.blue('Hello') + ' World' + chalk.red('!')); // Compose multiple styles using the chainable API log(chalk.blue.bgRed.bold('Hello world!')); // Pass in multiple arguments log(chalk.blue('Hello', 'World!', 'Foo', 'bar', 'biz', 'baz')); // Nest styles log(chalk.red('Hello', chalk.underline.bgBlue('world') + '!')); // Nest styles of the same type even (color, underline, background) log(chalk.green( 'I am a green line ' + chalk.blue.underline.bold('with a blue substring') + ' that becomes green again!' )); // ES2015 template literal log(` CPU: ${chalk.red('90%')} RAM: ${chalk.green('40%')} DISK: ${chalk.yellow('70%')} `); // ES2015 tagged template literal log(chalk` CPU: {red ${cpu.totalPercent}%} RAM: {green ${ram.used / ram.total * 100}%} DISK: {rgb(255,131,0) ${disk.used / disk.total * 100}%} `); // Use RGB colors in terminal emulators that support it. log(chalk.keyword('orange')('Yay for orange colored text!')); log(chalk.rgb(123, 45, 67).underline('Underlined reddish color')); log(chalk.hex('#DEADED').bold('Bold gray!')); ``` Easily define your own themes: ```js const chalk = require('chalk'); const error = chalk.bold.red; const warning = chalk.keyword('orange'); console.log(error('Error!')); console.log(warning('Warning!')); ``` Take advantage of console.log [string substitution](https://nodejs.org/docs/latest/api/console.html#console_console_log_data_args): ```js const name = 'Sindre'; console.log(chalk.green('Hello %s'), name); //=> 'Hello Sindre' ``` ## API ### chalk.`<style>[.<style>...](string, [string...])` Example: `chalk.red.bold.underline('Hello', 'world');` Chain [styles](#styles) and call the last one as a method with a string argument. Order doesn't matter, and later styles take precedent in case of a conflict. This simply means that `chalk.red.yellow.green` is equivalent to `chalk.green`. Multiple arguments will be separated by space. ### chalk.enabled Color support is automatically detected, as is the level (see `chalk.level`). However, if you'd like to simply enable/disable Chalk, you can do so via the `.enabled` property. Chalk is enabled by default unless explicitly disabled via the constructor or `chalk.level` is `0`. If you need to change this in a reusable module, create a new instance: ```js const ctx = new chalk.constructor({enabled: false}); ``` ### chalk.level Color support is automatically detected, but you can override it by setting the `level` property. You should however only do this in your own code as it applies globally to all Chalk consumers. If you need to change this in a reusable module, create a new instance: ```js const ctx = new chalk.constructor({level: 0}); ``` Levels are as follows: 0. All colors disabled 1. Basic color support (16 colors) 2. 256 color support 3. Truecolor support (16 million colors) ### chalk.supportsColor Detect whether the terminal [supports color](https://github.com/chalk/supports-color). Used internally and handled for you, but exposed for convenience. Can be overridden by the user with the flags `--color` and `--no-color`. For situations where using `--color` is not possible, add the environment variable `FORCE_COLOR=1` to forcefully enable color or `FORCE_COLOR=0` to forcefully disable. The use of `FORCE_COLOR` overrides all other color support checks. Explicit 256/Truecolor mode can be enabled using the `--color=256` and `--color=16m` flags, respectively. ## Styles ### Modifiers - `reset` - `bold` - `dim` - `italic` *(Not widely supported)* - `underline` - `inverse` - `hidden` - `strikethrough` *(Not widely supported)* - `visible` (Text is emitted only if enabled) ### Colors - `black` - `red` - `green` - `yellow` - `blue` *(On Windows the bright version is used since normal blue is illegible)* - `magenta` - `cyan` - `white` - `gray` ("bright black") - `redBright` - `greenBright` - `yellowBright` - `blueBright` - `magentaBright` - `cyanBright` - `whiteBright` ### Background colors - `bgBlack` - `bgRed` - `bgGreen` - `bgYellow` - `bgBlue` - `bgMagenta` - `bgCyan` - `bgWhite` - `bgBlackBright` - `bgRedBright` - `bgGreenBright` - `bgYellowBright` - `bgBlueBright` - `bgMagentaBright` - `bgCyanBright` - `bgWhiteBright` ## Tagged template literal Chalk can be used as a [tagged template literal](http://exploringjs.com/es6/ch_template-literals.html#_tagged-template-literals). ```js const chalk = require('chalk'); const miles = 18; const calculateFeet = miles => miles * 5280; console.log(chalk` There are {bold 5280 feet} in a mile. In {bold ${miles} miles}, there are {green.bold ${calculateFeet(miles)} feet}. `); ``` Blocks are delimited by an opening curly brace (`{`), a style, some content, and a closing curly brace (`}`). Template styles are chained exactly like normal Chalk styles. The following two statements are equivalent: ```js console.log(chalk.bold.rgb(10, 100, 200)('Hello!')); console.log(chalk`{bold.rgb(10,100,200) Hello!}`); ``` Note that function styles (`rgb()`, `hsl()`, `keyword()`, etc.) may not contain spaces between parameters. All interpolated values (`` chalk`${foo}` ``) are converted to strings via the `.toString()` method. All curly braces (`{` and `}`) in interpolated value strings are escaped. ## 256 and Truecolor color support Chalk supports 256 colors and [Truecolor](https://gist.github.com/XVilka/8346728) (16 million colors) on supported terminal apps. Colors are downsampled from 16 million RGB values to an ANSI color format that is supported by the terminal emulator (or by specifying `{level: n}` as a Chalk option). For example, Chalk configured to run at level 1 (basic color support) will downsample an RGB value of #FF0000 (red) to 31 (ANSI escape for red). Examples: - `chalk.hex('#DEADED').underline('Hello, world!')` - `chalk.keyword('orange')('Some orange text')` - `chalk.rgb(15, 100, 204).inverse('Hello!')` Background versions of these models are prefixed with `bg` and the first level of the module capitalized (e.g. `keyword` for foreground colors and `bgKeyword` for background colors). - `chalk.bgHex('#DEADED').underline('Hello, world!')` - `chalk.bgKeyword('orange')('Some orange text')` - `chalk.bgRgb(15, 100, 204).inverse('Hello!')` The following color models can be used: - [`rgb`](https://en.wikipedia.org/wiki/RGB_color_model) - Example: `chalk.rgb(255, 136, 0).bold('Orange!')` - [`hex`](https://en.wikipedia.org/wiki/Web_colors#Hex_triplet) - Example: `chalk.hex('#FF8800').bold('Orange!')` - [`keyword`](https://www.w3.org/wiki/CSS/Properties/color/keywords) (CSS keywords) - Example: `chalk.keyword('orange').bold('Orange!')` - [`hsl`](https://en.wikipedia.org/wiki/HSL_and_HSV) - Example: `chalk.hsl(32, 100, 50).bold('Orange!')` - [`hsv`](https://en.wikipedia.org/wiki/HSL_and_HSV) - Example: `chalk.hsv(32, 100, 100).bold('Orange!')` - [`hwb`](https://en.wikipedia.org/wiki/HWB_color_model) - Example: `chalk.hwb(32, 0, 50).bold('Orange!')` - `ansi16` - `ansi256` ## Windows If you're on Windows, do yourself a favor and use [`cmder`](http://cmder.net/) instead of `cmd.exe`. ## Origin story [colors.js](https://github.com/Marak/colors.js) used to be the most popular string styling module, but it has serious deficiencies like extending `String.prototype` which causes all kinds of [problems](https://github.com/yeoman/yo/issues/68) and the package is unmaintained. Although there are other packages, they either do too much or not enough. Chalk is a clean and focused alternative. ## Related - [chalk-cli](https://github.com/chalk/chalk-cli) - CLI for this module - [ansi-styles](https://github.com/chalk/ansi-styles) - ANSI escape codes for styling strings in the terminal - [supports-color](https://github.com/chalk/supports-color) - Detect whether a terminal supports color - [strip-ansi](https://github.com/chalk/strip-ansi) - Strip ANSI escape codes - [strip-ansi-stream](https://github.com/chalk/strip-ansi-stream) - Strip ANSI escape codes from a stream - [has-ansi](https://github.com/chalk/has-ansi) - Check if a string has ANSI escape codes - [ansi-regex](https://github.com/chalk/ansi-regex) - Regular expression for matching ANSI escape codes - [wrap-ansi](https://github.com/chalk/wrap-ansi) - Wordwrap a string with ANSI escape codes - [slice-ansi](https://github.com/chalk/slice-ansi) - Slice a string with ANSI escape codes - [color-convert](https://github.com/qix-/color-convert) - Converts colors between different models - [chalk-animation](https://github.com/bokub/chalk-animation) - Animate strings in the terminal - [gradient-string](https://github.com/bokub/gradient-string) - Apply color gradients to strings - [chalk-pipe](https://github.com/LitoMore/chalk-pipe) - Create chalk style schemes with simpler style strings - [terminal-link](https://github.com/sindresorhus/terminal-link) - Create clickable links in the terminal ## Maintainers - [Sindre Sorhus](https://github.com/sindresorhus) - [Josh Junon](https://github.com/qix-) ## License MIT
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/chalk/readme.md
0.450359
0.910227
readme.md
pypi
# 2.29.0 **Features:** * added utility `isCompilerOptionEnabled` # 2.28.0 Typeguards are now split into multiple submodules for each version of TypeScript (starting with 2.8.0). That means you can now import directly from `"tsutils/typeguard/2.8"` to get compatible declaraton files for TypeScript@2.8. For more information please read the relevant section in [README.md](README.md). **Features:** * added typeguards: `isTupleType`, `isOptionalTypeNode`, `isRestTypeNode`, `isSyntheticExpression` (currently available from `"tsutils/typeguard/3.0"`) * added utility `isStrictCompilerOptionEnabled` # 2.27.2 Avoid crash caused by removed function in `typescript@3.0.0`. # 2.27.1 Added support for TypeScript@3.0.0 nightly builds. # 2.27.0 **Features:** * added `getIIFE` utility # 2.26.2 **Bugfixes:** * `forEachComment` and `forEachTokenWithTrivia` no longer duplicate comments around missing nodes # 2.26.1 **Bugfixes:** * fixed crash in `hasSideEffects` with tagged template literal without substitution: ``tag`template` `` # 2.26.0 **Features:** * added typeguard `isLiteralTypeNode` * added support for type imports (`type T = import('foo')`) to `findImports` via `ImportKind.ImportType` # 2.25.1 **Bugfixes:** * `collectVariableUsage`: fixed name lookup in function signatures to match runtime behavior. Note that this is not completely fixed in TypeScript, yet. See: [Microsoft/TypeScript#22825](https://github.com/Microsoft/TypeScript/issues/22825) and [Microsoft/TypeScript#22769](https://github.com/Microsoft/TypeScript/issues/22769) # 2.25.0 **Features:** * added utilities: `isStatementInAmbientContext` and `isAmbientModuleBlock` # 2.24.0 **Features:** * added typeguards for typescript@2.8: `isConditionalTypeNode`, `isInferTypeNode`, `isConditionalType`, `isInstantiableType`, `isSubstitutionType` # 2.23.0 **Features:** * added typeguard `isForInOrOfStatement` **Bugfixes:** * correctly handle comments in generic JSX elements: `<MyComponent<string>/*comment*/></MyComponent>` * fixed a bug with false positive trailing comments at the end of JSX self closing element: `<div><br/>/*no comment*/</div>` # 2.22.2 **Bugfixes:** * `collectVariableUsage`: handle ConditionalTypes and `infer T`, which will be introduced in TypeScript@2.8.0 and are already available in nightly builds * `isLiteralType` no longer returns true for `ts.TypeFlags.BooleanLiteral` as this is not a `ts.LiteralType` # 2.22.1 **Bugfixes:** * `endsControlFlow`: * handle loops that might not even run a single iteration * handle constant boolean conditions in loops and if # 2.22.0 **Features:** * added `isFalsyType` utility # 2.21.2 **Bugfixes:** * fixed compile error with `typescript@2.8.0-dev` # 2.21.1 **Bugfixes:** * `isReassignmentTarget`: handle type assertions and non-null assertion # 2.21.0 **Bugfixes:** * `forEachDeclaredVariable` uses a more precise type for the callback parameter to make it useable again with typescript@2.7.1 **Features:** * added `isUniqueESSymbolType` typeguard # 2.20.0 **Features:** * added `isThenableType` utility * added `unionTypeParts` utility # 2.19.1 **Bugfixes:** * `forEachComment`, `getCommentAtPosition` and `isPositionInComment`: skip shebang (`#! something`) to not miss following comments at the start of the file # 2.19.0 **Features:** * added `WrappedAst` interface that models the type of a wrapped SourceFile more accurate * added `getWrappedNodeAtPosition` utiltiy that takes a `NodeWrap` and returns the most deeply nested NodeWrap that contains the given position # 2.18.0 **Features:** * `getControlFlowEnd` accepts BlockLike as argument **Bugfixes:** * `getControlFlowEnd` and `endsControlFlow`: correctly handle nested LabeledStatements * `endsControlFlow` removed erroneous special case when an IterationStatement is passed as argument whose parent is a LabeledStatement. * if you want labels of an IterationStatement (or SwitchStatement) to be handled, you need to pass the LabeledStatement as argument. * :warning: this fix may change the returned value if you relied on the buggy behavior **Deprecations:** * deprecated overload of `getControlFlowEnd` that contains the `label` parameter. This parameter is no longer used and should no longer be passed to the function. # 2.17.1 **Bugfixes:** * `getControlFlowEnd` and `endsControlFlow` (#22) * ThrowStatements inside `try` are filtered out if there is a `catch` clause * TryStatements with `catch` only end control flow if `try` AND `catch` definitely end control flow # 2.17.0 **Features:** * added `kind` property to `NodeWrap` * added `getControlFlowEnd` to public API # 2.16.0 **Features:** * added `isDecorator` and `isCallLikeExpression` typeguards # 2.15.0 **Features:** * added `convertAst` utility to produce a flattened and wrapped version of the AST # 2.14.0 **Features:** * added `isDeleteExpression` * added `getLineBreakStyle` # 2.13.1 **Bugfixes:** * fixed name of `isJsxFragment` # 2.13.0 **Features:** * added support for `JsxFragment` introduced in typescript@2.6.2 * added corresponding typeguard functions # 2.12.2 **Bugfixes:** * `endsControlFlow` * added missing logic for labeled statement, iteration statements and try-catch * added missing logic for `break` and `continue` with labels * take all jump statements into account, not only the last statement * `isValidIdentifier` and `isValidNumericLiteral` handle irregular whitespace * `findImports` searches in ambient modules inside regular `.ts` files (not only `.d.ts`) * `canHaveJsDoc` is now a typeguard # 2.12.1 **Bugfixes:** * `forEachTokenWithTrivia` * handles irregular whitespace and no longer visits some tokens twice * correctly calculates the range of JsxText # 2.12.0 **API-Changes:** * deprecated `ImportOptions` if favor of the new `ImportKind` enum # 2.11.2 **Bugfixes:** * `parseJsDocOfNode`: set correct `pos`, `end` and `parent` properties. Also affects `getJsDoc` of `EndOfFileToken` # 2.11.1 **Bugfixes:** * `collectVariableUsage`: correctly consider catch binding as block scoped declaration inside catch block # 2.11.0 **Bugfixes:** * `getJsDoc` now correctly returns JsDoc for `EndOfFileToken` **Features:** * added utility `parseJsDocOfNode` # 2.10.0 **Features:** * added utility `findImports` to find all kinds of imports in a source file # 2.9.0 **Features:** * added typeguard `isMappedTypeNode` * added utilities `canHaveJsDoc` and `getJsDoc` # 2.8.2 **Bugfixes:** * `collectVariableUsage`: handle global augmentation like other module augmentations # 2.8.1 **Bugfixes:** * Support `typescript@2.5.1` with optional catch binding * `collectVariableUsage` fixed a bug where method decorator had method's parameters in scope # 2.8.0 * Compatibility with the latest typescript nightly * Added `getIdentifierText` to unescape identifiers across typescript versions # 2.7.1 **Bugfixes:** * `isReassignmentTarget` don't return `true` for right side of assignment # 2.7.0 **Features:** * Added `isReassignmentTarget` utility # 2.6.1 **Bugfixes:** * `getDeclarationDomain` now returns `undefined` for Parameter in IndexSignature * `collectVariableUsage` ignores Parameter in IndexSignature # 2.6.0 **Bugfixes:** * `collectVariableUsage`: * don't merge imports with global declarations * treat everything in a declaration file as exported if there is no explicit `export {};` * `isExpressionValueUsed`: handle destructuring in `for...of` **Features:** * Added `getModifier` utility * Added `DeclarationDomain.Import` to distinguish imports from other declarations # 2.5.1 **Bugfixes:** * `collectVariableUsage` ignore jump labels as in `break label;` # 2.5.0 **Bugfixes:** * `isFunctionWithBody` handles constructor overload correctly. **Features:** * Implemented `isExpressionValueUsed` to check whether the result of an expression is actually used. * Implemented `getDeclarationDomain` to determine if a given declaration introduces a new symbol in the value or type domain. **`collectVariableUses` is now usable** * no longer ignores signatures and its parameters * don't merge declarations and uses across domains * no longer marks exceptions in catch clause or parameter properties as exported * fixed exports of namespaces * fixed scoping of ClassExpression name * correcly handle ambient namespaces and module augmentations * fixed how `: typeof foo` is handled for parameters and function return type * **still WIP**: `export {Foo as Bar}` inside ambient namespaces and modules # 2.4.0 **Bugfixes:** * `getLineRanges`: `contentLength` now contains the correct line length when there are multiple consecutive line break characters * `getTokenAtPosition`: don't match tokens that end at the specified position (because that's already outside of their range) * deprecated the misnamed `isModfierFlagSet`, use the new `isModifierFlagSet` instead **Features:** * Added typeguard: `isJsDoc` * Added experimental scope and usage analysis (`getUsageDomain` and `collectVariableUsage`) # 2.3.0 **Bugfixes:** * `forEachComment` no longer omits some comments when callback returns a truthy value * `isPositionInComment` fixed false positive inside JSXText **Features:** * Added utility: `getCommentAtPosition` # 2.2.0 **Bugfixes:** * Fixed bit value of `SideEffectOptions.JsxElement` to be a power of 2 **Features:** * Added utilities: `getTokenAtPosition` and `isPositionInComment` # 2.1.0 **Features:** * Added typeguard `isExpression` * Added utilities: `hasSideEffects`, `getDeclarationOfBindingElement` # 2.0.0 **Breaking Changes:** * Dropped compatibility with `typescript@<2.1.0` * Removed misnamed `isNumericliteral`, use `isNumericLiteral` instead (notice the uppercase L) * Removed `isEnumLiteralType` which will cause compile errors with typescript@2.4.0 * Refactored directory structure: all imports that referenced subdirectories (e.g. `require('tsutils/src/typeguard')` will be broken **Features:** * New directory structure allows imports of typeguards or utils independently, e.g. (`require('tsutils/typeguard')`) # 1.9.1 **Bugfixes:** * `isObjectFlagSet` now uses the correct `objectFlags` property # 1.9.0 **Bugfixes:** * `getNextToken` no longer omits `EndOfFileToken` when there is no trivia before EOF. That means the only inputs where `getNextToken` returns `undefined` are `SourceFile` and `EndOfFileToken` **Features**: * Added typeguards for types * Added utilities for flag checking: `isNodeFlagSet`, `isTypeFlagSet`, `isSymbolFlagSet`,`isObjectFlagSet`, `isModifierFlagSet` # 1.8.0 **Features:** * Support peer dependency of typescript nightlies of 2.4.0 * Added typeguards: `isJsxAttributes`, `isIntersectionTypeNode`, `isTypeOperatorNode`, `isTypePredicateNode`, `isTypeQueryNode`, `isUnionTypeNode` # 1.7.0 **Bugfixes:** * `isFunctionScopeBoundary` now handles Interfaces, TypeAliases, FunctionSignatures, etc **Features:** * Added utilities: `isThisParameter`, `isSameLine` and `isFunctionWithBody` # 1.6.0 **Features:** * Add `isValidPropertyAccess`, `isValidNumericLiteral` and `isValidPropertyName` # 1.5.0 **Features:** * Add `isValidIdentifier` # 1.4.0 **Features:** * Add `contentLength` property to the result of `getLineRanges` # 1.3.0 **Bugfixes:** * `canHaveLeadingTrivia`: * Fix property access on undefined parent reference * Fixes: [palantir/tslint#2330](https://github.com/palantir/tslint/issues/2330) * `hasOwnThisReference`: now includes accessors on object literals **Features:** * Typeguards: * isTypeParameterDeclaration * isEnitityName # 1.2.2 **Bugfixes:** * `hasOwnThisReference`: * exclude overload signatures of function declarations * add method declarations on object literals # 1.2.1 **Bugfixes:** * Fix name of `isNumericLiteral` # 1.2.0 **Features:** * Typeguards: * isEnumMember * isExpressionWithTypeArguments * isImportSpecifier * Utilities: * isJsDocKind, isTypeNodeKind * Allow typescript@next in peerDependencies # 1.1.0 **Bugfixes:** * Fix isBlockScopeBoundary: Remove WithStatement, IfStatment, DoStatement and WhileStatement because they are no scope boundary whitout a block. **Features:** * Added more typeguards: * isAssertionExpression * isEmptyStatement * isJsxAttributeLike * isJsxOpeningLikeElement * isNonNullExpression * isSyntaxList * Utilities: * getNextToken, getPreviousToken * hasOwnThisReference * getLineRanges # 1.0.0 **Features:** * Initial implementation of typeguards * Utilities: * getChildOfKind * isNodeKind, isAssignmentKind * hasModifier, isParameterProperty, hasAccessModifier * getPreviousStatement, getNextStatement * getPropertyName * forEachDestructuringIdentifier, forEachDeclaredVariable * getVariableDeclarationKind, isBlockScopedVariableDeclarationList, isBlockScopedVariableDeclaration * isScopeBoundary, isFunctionScopeBoundary, isBlockScopeBoundary * forEachToken, forEachTokenWithTrivia, forEachComment * endsControlFlow
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/tsutils/CHANGELOG.md
0.730578
0.760673
CHANGELOG.md
pypi
# Form-Data [![NPM Module](https://img.shields.io/npm/v/form-data.svg)](https://www.npmjs.com/package/form-data) [![Join the chat at https://gitter.im/form-data/form-data](http://form-data.github.io/images/gitterbadge.svg)](https://gitter.im/form-data/form-data) A library to create readable ```"multipart/form-data"``` streams. Can be used to submit forms and file uploads to other web applications. The API of this library is inspired by the [XMLHttpRequest-2 FormData Interface][xhr2-fd]. [xhr2-fd]: http://dev.w3.org/2006/webapi/XMLHttpRequest-2/Overview.html#the-formdata-interface [![Linux Build](https://img.shields.io/travis/form-data/form-data/v2.3.2.svg?label=linux:4.x-9.x)](https://travis-ci.org/form-data/form-data) [![MacOS Build](https://img.shields.io/travis/form-data/form-data/v2.3.2.svg?label=macos:4.x-9.x)](https://travis-ci.org/form-data/form-data) [![Windows Build](https://img.shields.io/appveyor/ci/alexindigo/form-data/v2.3.2.svg?label=windows:4.x-9.x)](https://ci.appveyor.com/project/alexindigo/form-data) [![Coverage Status](https://img.shields.io/coveralls/form-data/form-data/v2.3.2.svg?label=code+coverage)](https://coveralls.io/github/form-data/form-data?branch=master) [![Dependency Status](https://img.shields.io/david/form-data/form-data.svg)](https://david-dm.org/form-data/form-data) [![bitHound Overall Score](https://www.bithound.io/github/form-data/form-data/badges/score.svg)](https://www.bithound.io/github/form-data/form-data) ## Install ``` npm install --save form-data ``` ## Usage In this example we are constructing a form with 3 fields that contain a string, a buffer and a file stream. ``` javascript var FormData = require('form-data'); var fs = require('fs'); var form = new FormData(); form.append('my_field', 'my value'); form.append('my_buffer', new Buffer(10)); form.append('my_file', fs.createReadStream('/foo/bar.jpg')); ``` Also you can use http-response stream: ``` javascript var FormData = require('form-data'); var http = require('http'); var form = new FormData(); http.request('http://nodejs.org/images/logo.png', function(response) { form.append('my_field', 'my value'); form.append('my_buffer', new Buffer(10)); form.append('my_logo', response); }); ``` Or @mikeal's [request](https://github.com/request/request) stream: ``` javascript var FormData = require('form-data'); var request = require('request'); var form = new FormData(); form.append('my_field', 'my value'); form.append('my_buffer', new Buffer(10)); form.append('my_logo', request('http://nodejs.org/images/logo.png')); ``` In order to submit this form to a web application, call ```submit(url, [callback])``` method: ``` javascript form.submit('http://example.org/', function(err, res) { // res – response object (http.IncomingMessage) // res.resume(); }); ``` For more advanced request manipulations ```submit()``` method returns ```http.ClientRequest``` object, or you can choose from one of the alternative submission methods. ### Custom options You can provide custom options, such as `maxDataSize`: ``` javascript var FormData = require('form-data'); var form = new FormData({ maxDataSize: 20971520 }); form.append('my_field', 'my value'); form.append('my_buffer', /* something big */); ``` List of available options could be found in [combined-stream](https://github.com/felixge/node-combined-stream/blob/master/lib/combined_stream.js#L7-L15) ### Alternative submission methods You can use node's http client interface: ``` javascript var http = require('http'); var request = http.request({ method: 'post', host: 'example.org', path: '/upload', headers: form.getHeaders() }); form.pipe(request); request.on('response', function(res) { console.log(res.statusCode); }); ``` Or if you would prefer the `'Content-Length'` header to be set for you: ``` javascript form.submit('example.org/upload', function(err, res) { console.log(res.statusCode); }); ``` To use custom headers and pre-known length in parts: ``` javascript var CRLF = '\r\n'; var form = new FormData(); var options = { header: CRLF + '--' + form.getBoundary() + CRLF + 'X-Custom-Header: 123' + CRLF + CRLF, knownLength: 1 }; form.append('my_buffer', buffer, options); form.submit('http://example.com/', function(err, res) { if (err) throw err; console.log('Done'); }); ``` Form-Data can recognize and fetch all the required information from common types of streams (```fs.readStream```, ```http.response``` and ```mikeal's request```), for some other types of streams you'd need to provide "file"-related information manually: ``` javascript someModule.stream(function(err, stdout, stderr) { if (err) throw err; var form = new FormData(); form.append('file', stdout, { filename: 'unicycle.jpg', // ... or: filepath: 'photos/toys/unicycle.jpg', contentType: 'image/jpeg', knownLength: 19806 }); form.submit('http://example.com/', function(err, res) { if (err) throw err; console.log('Done'); }); }); ``` The `filepath` property overrides `filename` and may contain a relative path. This is typically used when uploading [multiple files from a directory](https://wicg.github.io/entries-api/#dom-htmlinputelement-webkitdirectory). For edge cases, like POST request to URL with query string or to pass HTTP auth credentials, object can be passed to `form.submit()` as first parameter: ``` javascript form.submit({ host: 'example.com', path: '/probably.php?extra=params', auth: 'username:password' }, function(err, res) { console.log(res.statusCode); }); ``` In case you need to also send custom HTTP headers with the POST request, you can use the `headers` key in first parameter of `form.submit()`: ``` javascript form.submit({ host: 'example.com', path: '/surelynot.php', headers: {'x-test-header': 'test-header-value'} }, function(err, res) { console.log(res.statusCode); }); ``` ### Integration with other libraries #### Request Form submission using [request](https://github.com/request/request): ```javascript var formData = { my_field: 'my_value', my_file: fs.createReadStream(__dirname + '/unicycle.jpg'), }; request.post({url:'http://service.com/upload', formData: formData}, function(err, httpResponse, body) { if (err) { return console.error('upload failed:', err); } console.log('Upload successful! Server responded with:', body); }); ``` For more details see [request readme](https://github.com/request/request#multipartform-data-multipart-form-uploads). #### node-fetch You can also submit a form using [node-fetch](https://github.com/bitinn/node-fetch): ```javascript var form = new FormData(); form.append('a', 1); fetch('http://example.com', { method: 'POST', body: form }) .then(function(res) { return res.json(); }).then(function(json) { console.log(json); }); ``` ## Notes - ```getLengthSync()``` method DOESN'T calculate length for streams, use ```knownLength``` options as workaround. - Starting version `2.x` FormData has dropped support for `node@0.10.x`. ## License Form-Data is released under the [MIT](License) license.
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/form-data/README.md
0.743634
0.871748
README.md
pypi
# yargs-parser [![Build Status](https://travis-ci.org/yargs/yargs-parser.png)](https://travis-ci.org/yargs/yargs-parser) [![Coverage Status](https://coveralls.io/repos/yargs/yargs-parser/badge.svg?branch=)](https://coveralls.io/r/yargs/yargs-parser?branch=master) [![NPM version](https://img.shields.io/npm/v/yargs-parser.svg)](https://www.npmjs.com/package/yargs-parser) [![Windows Tests](https://img.shields.io/appveyor/ci/bcoe/yargs-parser/master.svg?label=Windows%20Tests)](https://ci.appveyor.com/project/bcoe/yargs-parser) [![Standard Version](https://img.shields.io/badge/release-standard%20version-brightgreen.svg)](https://github.com/conventional-changelog/standard-version) The mighty option parser used by [yargs](https://github.com/yargs/yargs). visit the [yargs website](http://yargs.js.org/) for more examples, and thorough usage instructions. <img width="250" src="https://raw.githubusercontent.com/yargs/yargs-parser/master/yargs-logo.png"> ## Example ```sh npm i yargs-parser --save ``` ```js var argv = require('yargs-parser')(process.argv.slice(2)) console.log(argv) ``` ```sh node example.js --foo=33 --bar hello { _: [], foo: 33, bar: 'hello' } ``` _or parse a string!_ ```js var argv = require('./')('--foo=99 --bar=33') console.log(argv) ``` ```sh { _: [], foo: 99, bar: 33 } ``` Convert an array of mixed types before passing to `yargs-parser`: ```js var parse = require('yargs-parser') parse(['-f', 11, '--zoom', 55].join(' ')) // <-- array to string parse(['-f', 11, '--zoom', 55].map(String)) // <-- array of strings ``` ## API ### require('yargs-parser')(args, opts={}) Parses command line arguments returning a simple mapping of keys and values. **expects:** * `args`: a string or array of strings representing the options to parse. * `opts`: provide a set of hints indicating how `args` should be parsed: * `opts.alias`: an object representing the set of aliases for a key: `{alias: {foo: ['f']}}`. * `opts.array`: indicate that keys should be parsed as an array: `{array: ['foo', 'bar']}`. * `opts.boolean`: arguments should be parsed as booleans: `{boolean: ['x', 'y']}`. * `opts.config`: indicate a key that represents a path to a configuration file (this file will be loaded and parsed). * `opts.coerce`: provide a custom synchronous function that returns a coerced value from the argument provided (or throws an error), e.g. `{coerce: {foo: function (arg) {return modifiedArg}}}`. * `opts.count`: indicate a key that should be used as a counter, e.g., `-vvv` = `{v: 3}`. * `opts.default`: provide default values for keys: `{default: {x: 33, y: 'hello world!'}}`. * `opts.envPrefix`: environment variables (`process.env`) with the prefix provided should be parsed. * `opts.narg`: specify that a key requires `n` arguments: `{narg: {x: 2}}`. * `opts.normalize`: `path.normalize()` will be applied to values set to this key. * `opts.string`: keys should be treated as strings (even if they resemble a number `-x 33`). * `opts.configuration`: provide configuration options to the yargs-parser (see: [configuration](#configuration)). * `opts.number`: keys should be treated as numbers. * `opts['--']`: arguments after the end-of-options flag `--` will be set to the `argv.['--']` array instead of being set to the `argv._` array. **returns:** * `obj`: an object representing the parsed value of `args` * `key/value`: key value pairs for each argument and their aliases. * `_`: an array representing the positional arguments. * [optional] `--`: an array with arguments after the end-of-options flag `--`. ### require('yargs-parser').detailed(args, opts={}) Parses a command line string, returning detailed information required by the yargs engine. **expects:** * `args`: a string or array of strings representing options to parse. * `opts`: provide a set of hints indicating how `args`, inputs are identical to `require('yargs-parser')(args, opts={})`. **returns:** * `argv`: an object representing the parsed value of `args` * `key/value`: key value pairs for each argument and their aliases. * `_`: an array representing the positional arguments. * `error`: populated with an error object if an exception occurred during parsing. * `aliases`: the inferred list of aliases built by combining lists in `opts.alias`. * `newAliases`: any new aliases added via camel-case expansion. * `configuration`: the configuration loaded from the `yargs` stanza in package.json. <a name="configuration"></a> ### Configuration The yargs-parser applies several automated transformations on the keys provided in `args`. These features can be turned on and off using the `configuration` field of `opts`. ```js var parsed = parser(['--no-dice'], { configuration: { 'boolean-negation': false } }) ``` ### short option groups * default: `true`. * key: `short-option-groups`. Should a group of short-options be treated as boolean flags? ```sh node example.js -abc { _: [], a: true, b: true, c: true } ``` _if disabled:_ ```sh node example.js -abc { _: [], abc: true } ``` ### camel-case expansion * default: `true`. * key: `camel-case-expansion`. Should hyphenated arguments be expanded into camel-case aliases? ```sh node example.js --foo-bar { _: [], 'foo-bar': true, fooBar: true } ``` _if disabled:_ ```sh node example.js --foo-bar { _: [], 'foo-bar': true } ``` ### dot-notation * default: `true` * key: `dot-notation` Should keys that contain `.` be treated as objects? ```sh node example.js --foo.bar { _: [], foo: { bar: true } } ``` _if disabled:_ ```sh node example.js --foo.bar { _: [], "foo.bar": true } ``` ### parse numbers * default: `true` * key: `parse-numbers` Should keys that look like numbers be treated as such? ```sh node example.js --foo=99.3 { _: [], foo: 99.3 } ``` _if disabled:_ ```sh node example.js --foo=99.3 { _: [], foo: "99.3" } ``` ### boolean negation * default: `true` * key: `boolean-negation` Should variables prefixed with `--no` be treated as negations? ```sh node example.js --no-foo { _: [], foo: false } ``` _if disabled:_ ```sh node example.js --no-foo { _: [], "no-foo": true } ``` ### combine arrays * default: `false` * key: `combine-arrays` Should arrays be combined when provided by both command line arguments and a configuration file. ### duplicate arguments array * default: `true` * key: `duplicate-arguments-array` Should arguments be coerced into an array when duplicated: ```sh node example.js -x 1 -x 2 { _: [], x: [1, 2] } ``` _if disabled:_ ```sh node example.js -x 1 -x 2 { _: [], x: 2 } ``` ### flatten duplicate arrays * default: `true` * key: `flatten-duplicate-arrays` Should array arguments be coerced into a single array when duplicated: ```sh node example.js -x 1 2 -x 3 4 { _: [], x: [1, 2, 3, 4] } ``` _if disabled:_ ```sh node example.js -x 1 2 -x 3 4 { _: [], x: [[1, 2], [3, 4]] } ``` ### negation prefix * default: `no-` * key: `negation-prefix` The prefix to use for negated boolean variables. ```sh node example.js --no-foo { _: [], foo: false } ``` _if set to `quux`:_ ```sh node example.js --quuxfoo { _: [], foo: false } ``` ### populate -- * default: `false`. * key: `populate--` Should unparsed flags be stored in `--` or `_`. _If disabled:_ ```sh node example.js a -b -- x y { _: [ 'a', 'x', 'y' ], b: true } ``` _If enabled:_ ```sh node example.js a -b -- x y { _: [ 'a' ], '--': [ 'x', 'y' ], b: true } ``` ### set placeholder key * default: `false`. * key: `set-placeholder-key`. Should a placeholder be added for keys not set via the corresponding CLI argument? _If disabled:_ ```sh node example.js -a 1 -c 2 { _: [], a: 1, c: 2 } ``` _If enabled:_ ```sh node example.js -a 1 -c 2 { _: [], a: 1, b: undefined, c: 2 } ``` ## Special Thanks The yargs project evolves from optimist and minimist. It owes its existence to a lot of James Halliday's hard work. Thanks [substack](https://github.com/substack) **beep** **boop** \o/ ## License ISC
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/yargs-parser/README.md
0.42322
0.911022
README.md
pypi
export { Observable } from './internal/Observable'; export { ConnectableObservable } from './internal/observable/ConnectableObservable'; export { GroupedObservable } from './internal/operators/groupBy'; export { Operator } from './internal/Operator'; export { observable } from './internal/symbol/observable'; export { Subject } from './internal/Subject'; export { BehaviorSubject } from './internal/BehaviorSubject'; export { ReplaySubject } from './internal/ReplaySubject'; export { AsyncSubject } from './internal/AsyncSubject'; export { asap as asapScheduler } from './internal/scheduler/asap'; export { async as asyncScheduler } from './internal/scheduler/async'; export { queue as queueScheduler } from './internal/scheduler/queue'; export { animationFrame as animationFrameScheduler } from './internal/scheduler/animationFrame'; export { VirtualTimeScheduler, VirtualAction } from './internal/scheduler/VirtualTimeScheduler'; export { Scheduler } from './internal/Scheduler'; export { Subscription } from './internal/Subscription'; export { Subscriber } from './internal/Subscriber'; export { Notification } from './internal/Notification'; export { pipe } from './internal/util/pipe'; export { noop } from './internal/util/noop'; export { identity } from './internal/util/identity'; export { isObservable } from './internal/util/isObservable'; export { ArgumentOutOfRangeError } from './internal/util/ArgumentOutOfRangeError'; export { EmptyError } from './internal/util/EmptyError'; export { ObjectUnsubscribedError } from './internal/util/ObjectUnsubscribedError'; export { UnsubscriptionError } from './internal/util/UnsubscriptionError'; export { TimeoutError } from './internal/util/TimeoutError'; export { bindCallback } from './internal/observable/bindCallback'; export { bindNodeCallback } from './internal/observable/bindNodeCallback'; export { combineLatest } from './internal/observable/combineLatest'; export { concat } from './internal/observable/concat'; export { defer } from './internal/observable/defer'; export { empty } from './internal/observable/empty'; export { forkJoin } from './internal/observable/forkJoin'; export { from } from './internal/observable/from'; export { fromEvent } from './internal/observable/fromEvent'; export { fromEventPattern } from './internal/observable/fromEventPattern'; export { generate } from './internal/observable/generate'; export { iif } from './internal/observable/iif'; export { interval } from './internal/observable/interval'; export { merge } from './internal/observable/merge'; export { never } from './internal/observable/never'; export { of } from './internal/observable/of'; export { onErrorResumeNext } from './internal/observable/onErrorResumeNext'; export { pairs } from './internal/observable/pairs'; export { race } from './internal/observable/race'; export { range } from './internal/observable/range'; export { throwError } from './internal/observable/throwError'; export { timer } from './internal/observable/timer'; export { using } from './internal/observable/using'; export { zip } from './internal/observable/zip'; export { EMPTY } from './internal/observable/empty'; export { NEVER } from './internal/observable/never'; export * from './internal/types'; export { config } from './internal/config';
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/rxjs/index.d.ts
0.837221
0.506164
index.d.ts
pypi
<h1><p align="center"><img alt="protobuf.js" src="https://github.com/dcodeIO/protobuf.js/raw/master/pbjs.png" width="120" height="104" /></p></h1> <p align="center"><a href="https://npmjs.org/package/protobufjs"><img src="https://img.shields.io/npm/v/protobufjs.svg" alt=""></a> <a href="https://travis-ci.org/dcodeIO/protobuf.js"><img src="https://travis-ci.org/dcodeIO/protobuf.js.svg?branch=master" alt=""></a> <a href="https://npmjs.org/package/protobufjs"><img src="https://img.shields.io/npm/dm/protobufjs.svg" alt=""></a> <a href="https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=dcode%40dcode.io&item_name=Open%20Source%20Software%20Donation&item_number=dcodeIO%2Fprotobuf.js"><img alt="donate ❤" src="https://img.shields.io/badge/donate-❤-ff2244.svg"></a></p> **Protocol Buffers** are a language-neutral, platform-neutral, extensible way of serializing structured data for use in communications protocols, data storage, and more, originally designed at Google ([see](https://developers.google.com/protocol-buffers/)). **protobuf.js** is a pure JavaScript implementation with [TypeScript](https://www.typescriptlang.org) support for [node.js](https://nodejs.org) and the browser. It's easy to use, blazingly fast and works out of the box with [.proto](https://developers.google.com/protocol-buffers/docs/proto) files! Contents -------- * [Installation](#installation)<br /> How to include protobuf.js in your project. * [Usage](#usage)<br /> A brief introduction to using the toolset. * [Valid Message](#valid-message) * [Toolset](#toolset)<br /> * [Examples](#examples)<br /> A few examples to get you started. * [Using .proto files](#using-proto-files) * [Using JSON descriptors](#using-json-descriptors) * [Using reflection only](#using-reflection-only) * [Using custom classes](#using-custom-classes) * [Using services](#using-services) * [Usage with TypeScript](#usage-with-typescript)<br /> * [Command line](#command-line)<br /> How to use the command line utility. * [pbjs for JavaScript](#pbjs-for-javascript) * [pbts for TypeScript](#pbts-for-typescript) * [Reflection vs. static code](#reflection-vs-static-code) * [Command line API](#command-line-api)<br /> * [Additional documentation](#additional-documentation)<br /> A list of available documentation resources. * [Performance](#performance)<br /> A few internals and a benchmark on performance. * [Compatibility](#compatibility)<br /> Notes on compatibility regarding browsers and optional libraries. * [Building](#building)<br /> How to build the library and its components yourself. Installation --------------- ### node.js ``` $> npm install protobufjs [--save --save-prefix=~] ``` ```js var protobuf = require("protobufjs"); ``` **Note** that this library's versioning scheme is not semver-compatible for historical reasons. For guaranteed backward compatibility, always depend on `~6.A.B` instead of `^6.A.B` (hence the `--save-prefix` above). ### Browsers Development: ``` <script src="//cdn.rawgit.com/dcodeIO/protobuf.js/6.X.X/dist/protobuf.js"></script> ``` Production: ``` <script src="//cdn.rawgit.com/dcodeIO/protobuf.js/6.X.X/dist/protobuf.min.js"></script> ``` **Remember** to replace the version tag with the exact [release](https://github.com/dcodeIO/protobuf.js/tags) your project depends upon. The library supports CommonJS and AMD loaders and also exports globally as `protobuf`. ### Distributions Where bundle size is a factor, there are additional stripped-down versions of the [full library][dist-full] (~19kb gzipped) available that exclude certain functionality: * When working with JSON descriptors (i.e. generated by [pbjs](#pbjs-for-javascript)) and/or reflection only, see the [light library][dist-light] (~16kb gzipped) that excludes the parser. CommonJS entry point is: ```js var protobuf = require("protobufjs/light"); ``` * When working with statically generated code only, see the [minimal library][dist-minimal] (~6.5kb gzipped) that also excludes reflection. CommonJS entry point is: ```js var protobuf = require("protobufjs/minimal"); ``` [dist-full]: https://github.com/dcodeIO/protobuf.js/tree/master/dist [dist-light]: https://github.com/dcodeIO/protobuf.js/tree/master/dist/light [dist-minimal]: https://github.com/dcodeIO/protobuf.js/tree/master/dist/minimal Usage ----- Because JavaScript is a dynamically typed language, protobuf.js introduces the concept of a **valid message** in order to provide the best possible [performance](#performance) (and, as a side product, proper typings): ### Valid message > A valid message is an object (1) not missing any required fields and (2) exclusively composed of JS types understood by the wire format writer. There are two possible types of valid messages and the encoder is able to work with both of these for convenience: * **Message instances** (explicit instances of message classes with default values on their prototype) always (have to) satisfy the requirements of a valid message by design and * **Plain JavaScript objects** that just so happen to be composed in a way satisfying the requirements of a valid message as well. In a nutshell, the wire format writer understands the following types: | Field type | Expected JS type (create, encode) | Conversion (fromObject) |------------|-----------------------------------|------------------------ | s-/u-/int32<br />s-/fixed32 | `number` (32 bit integer) | <code>value &#124; 0</code> if signed<br />`value >>> 0` if unsigned | s-/u-/int64<br />s-/fixed64 | `Long`-like (optimal)<br />`number` (53 bit integer) | `Long.fromValue(value)` with long.js<br />`parseInt(value, 10)` otherwise | float<br />double | `number` | `Number(value)` | bool | `boolean` | `Boolean(value)` | string | `string` | `String(value)` | bytes | `Uint8Array` (optimal)<br />`Buffer` (optimal under node)<br />`Array.<number>` (8 bit integers) | `base64.decode(value)` if a `string`<br />`Object` with non-zero `.length` is assumed to be buffer-like | enum | `number` (32 bit integer) | Looks up the numeric id if a `string` | message | Valid message | `Message.fromObject(value)` * Explicit `undefined` and `null` are considered as not set if the field is optional. * Repeated fields are `Array.<T>`. * Map fields are `Object.<string,T>` with the key being the string representation of the respective value or an 8 characters long binary hash string for `Long`-likes. * Types marked as *optimal* provide the best performance because no conversion step (i.e. number to low and high bits or base64 string to buffer) is required. ### Toolset With that in mind and again for performance reasons, each message class provides a distinct set of methods with each method doing just one thing. This avoids unnecessary assertions / redundant operations where performance is a concern but also forces a user to perform verification (of plain JavaScript objects that *might* just so happen to be a valid message) explicitly where necessary - for example when dealing with user input. **Note** that `Message` below refers to any message class. * **Message.verify**(message: `Object`): `null|string`<br /> verifies that a **plain JavaScript object** satisfies the requirements of a valid message and thus can be encoded without issues. Instead of throwing, it returns the error message as a string, if any. ```js var payload = "invalid (not an object)"; var err = AwesomeMessage.verify(payload); if (err) throw Error(err); ``` * **Message.encode**(message: `Message|Object` [, writer: `Writer`]): `Writer`<br /> encodes a **message instance** or valid **plain JavaScript object**. This method does not implicitly verify the message and it's up to the user to make sure that the payload is a valid message. ```js var buffer = AwesomeMessage.encode(message).finish(); ``` * **Message.encodeDelimited**(message: `Message|Object` [, writer: `Writer`]): `Writer`<br /> works like `Message.encode` but additionally prepends the length of the message as a varint. * **Message.decode**(reader: `Reader|Uint8Array`): `Message`<br /> decodes a buffer to a **message instance**. If required fields are missing, it throws a `util.ProtocolError` with an `instance` property set to the so far decoded message. If the wire format is invalid, it throws an `Error`. ```js try { var decodedMessage = AwesomeMessage.decode(buffer); } catch (e) { if (e instanceof protobuf.util.ProtocolError) { // e.instance holds the so far decoded message with missing required fields } else { // wire format is invalid } } ``` * **Message.decodeDelimited**(reader: `Reader|Uint8Array`): `Message`<br /> works like `Message.decode` but additionally reads the length of the message prepended as a varint. * **Message.create**(properties: `Object`): `Message`<br /> creates a new **message instance** from a set of properties that satisfy the requirements of a valid message. Where applicable, it is recommended to prefer `Message.create` over `Message.fromObject` because it doesn't perform possibly redundant conversion. ```js var message = AwesomeMessage.create({ awesomeField: "AwesomeString" }); ``` * **Message.fromObject**(object: `Object`): `Message`<br /> converts any non-valid **plain JavaScript object** to a **message instance** using the conversion steps outlined within the table above. ```js var message = AwesomeMessage.fromObject({ awesomeField: 42 }); // converts awesomeField to a string ``` * **Message.toObject**(message: `Message` [, options: `ConversionOptions`]): `Object`<br /> converts a **message instance** to an arbitrary **plain JavaScript object** for interoperability with other libraries or storage. The resulting plain JavaScript object *might* still satisfy the requirements of a valid message depending on the actual conversion options specified, but most of the time it does not. ```js var object = AwesomeMessage.toObject(message, { enums: String, // enums as string names longs: String, // longs as strings (requires long.js) bytes: String, // bytes as base64 encoded strings defaults: true, // includes default values arrays: true, // populates empty arrays (repeated fields) even if defaults=false objects: true, // populates empty objects (map fields) even if defaults=false oneofs: true // includes virtual oneof fields set to the present field's name }); ``` For reference, the following diagram aims to display relationships between the different methods and the concept of a valid message: <p align="center"><img alt="Toolset Diagram" src="http://dcode.io/protobuf.js/toolset.svg" /></p> > In other words: `verify` indicates that calling `create` or `encode` directly on the plain object will [result in a valid message respectively] succeed. `fromObject`, on the other hand, does conversion from a broader range of plain objects to create valid messages. ([ref](https://github.com/dcodeIO/protobuf.js/issues/748#issuecomment-291925749)) Examples -------- ### Using .proto files It is possible to load existing .proto files using the full library, which parses and compiles the definitions to ready to use (reflection-based) message classes: ```protobuf // awesome.proto package awesomepackage; syntax = "proto3"; message AwesomeMessage { string awesome_field = 1; // becomes awesomeField } ``` ```js protobuf.load("awesome.proto", function(err, root) { if (err) throw err; // Obtain a message type var AwesomeMessage = root.lookupType("awesomepackage.AwesomeMessage"); // Exemplary payload var payload = { awesomeField: "AwesomeString" }; // Verify the payload if necessary (i.e. when possibly incomplete or invalid) var errMsg = AwesomeMessage.verify(payload); if (errMsg) throw Error(errMsg); // Create a new message var message = AwesomeMessage.create(payload); // or use .fromObject if conversion is necessary // Encode a message to an Uint8Array (browser) or Buffer (node) var buffer = AwesomeMessage.encode(message).finish(); // ... do something with buffer // Decode an Uint8Array (browser) or Buffer (node) to a message var message = AwesomeMessage.decode(buffer); // ... do something with message // If the application uses length-delimited buffers, there is also encodeDelimited and decodeDelimited. // Maybe convert the message back to a plain object var object = AwesomeMessage.toObject(message, { longs: String, enums: String, bytes: String, // see ConversionOptions }); }); ``` Additionally, promise syntax can be used by omitting the callback, if preferred: ```js protobuf.load("awesome.proto") .then(function(root) { ... }); ``` ### Using JSON descriptors The library utilizes JSON descriptors that are equivalent to a .proto definition. For example, the following is identical to the .proto definition seen above: ```json // awesome.json { "nested": { "AwesomeMessage": { "fields": { "awesomeField": { "type": "string", "id": 1 } } } } } ``` JSON descriptors closely resemble the internal reflection structure: | Type (T) | Extends | Type-specific properties |--------------------|--------------------|------------------------- | *ReflectionObject* | | options | *Namespace* | *ReflectionObject* | nested | Root | *Namespace* | **nested** | Type | *Namespace* | **fields** | Enum | *ReflectionObject* | **values** | Field | *ReflectionObject* | rule, **type**, **id** | MapField | Field | **keyType** | OneOf | *ReflectionObject* | **oneof** (array of field names) | Service | *Namespace* | **methods** | Method | *ReflectionObject* | type, **requestType**, **responseType**, requestStream, responseStream * **Bold properties** are required. *Italic types* are abstract. * `T.fromJSON(name, json)` creates the respective reflection object from a JSON descriptor * `T#toJSON()` creates a JSON descriptor from the respective reflection object (its name is used as the key within the parent) Exclusively using JSON descriptors instead of .proto files enables the use of just the light library (the parser isn't required in this case). A JSON descriptor can either be loaded the usual way: ```js protobuf.load("awesome.json", function(err, root) { if (err) throw err; // Continue at "Obtain a message type" above }); ``` Or it can be loaded inline: ```js var jsonDescriptor = require("./awesome.json"); // exemplary for node var root = protobuf.Root.fromJSON(jsonDescriptor); // Continue at "Obtain a message type" above ``` ### Using reflection only Both the full and the light library include full reflection support. One could, for example, define the .proto definitions seen in the examples above using just reflection: ```js ... var Root = protobuf.Root, Type = protobuf.Type, Field = protobuf.Field; var AwesomeMessage = new Type("AwesomeMessage").add(new Field("awesomeField", 1, "string")); var root = new Root().define("awesomepackage").add(AwesomeMessage); // Continue at "Create a new message" above ... ``` Detailed information on the reflection structure is available within the [API documentation](#additional-documentation). ### Using custom classes Message classes can also be extended with custom functionality and it is also possible to register a custom constructor with a reflected message type: ```js ... // Define a custom constructor function AwesomeMessage(properties) { // custom initialization code ... } // Register the custom constructor with its reflected type (*) root.lookupType("awesomepackage.AwesomeMessage").ctor = AwesomeMessage; // Define custom functionality AwesomeMessage.customStaticMethod = function() { ... }; AwesomeMessage.prototype.customInstanceMethod = function() { ... }; // Continue at "Create a new message" above ``` (*) Besides referencing its reflected type through `AwesomeMessage.$type` and `AwesomeMesage#$type`, the respective custom class is automatically populated with: * `AwesomeMessage.create` * `AwesomeMessage.encode` and `AwesomeMessage.encodeDelimited` * `AwesomeMessage.decode` and `AwesomeMessage.decodeDelimited` * `AwesomeMessage.verify` * `AwesomeMessage.fromObject`, `AwesomeMessage.toObject`, `AwesomeMessage#toObject` and `AwesomeMessage#toJSON` Afterwards, decoded messages of this type are `instanceof AwesomeMessage`. Alternatively, it is also possible to reuse and extend the internal constructor if custom initialization code is not required: ```js ... // Reuse the internal constructor var AwesomeMessage = root.lookupType("awesomepackage.AwesomeMessage").ctor; // Define custom functionality AwesomeMessage.customStaticMethod = function() { ... }; AwesomeMessage.prototype.customInstanceMethod = function() { ... }; // Continue at "Create a new message" above ``` ### Using services The library also supports consuming services but it doesn't make any assumptions about the actual transport channel. Instead, a user must provide a suitable RPC implementation, which is an asynchronous function that takes the reflected service method, the binary request and a node-style callback as its parameters: ```js function rpcImpl(method, requestData, callback) { // perform the request using an HTTP request or a WebSocket for example var responseData = ...; // and call the callback with the binary response afterwards: callback(null, responseData); } ``` Example: ```protobuf // greeter.proto syntax = "proto3"; service Greeter { rpc SayHello (HelloRequest) returns (HelloReply) {} } message HelloRequest { string name = 1; } message HelloReply { string message = 1; } ``` ```js ... var Greeter = root.lookup("Greeter"); var greeter = Greeter.create(/* see above */ rpcImpl, /* request delimited? */ false, /* response delimited? */ false); greeter.sayHello({ name: 'you' }, function(err, response) { console.log('Greeting:', response.message); }); ``` Services also support promises: ```js greeter.sayHello({ name: 'you' }) .then(function(response) { console.log('Greeting:', response.message); }); ``` There is also an [example for streaming RPC](https://github.com/dcodeIO/protobuf.js/blob/master/examples/streaming-rpc.js). Note that the service API is meant for clients. Implementing a server-side endpoint pretty much always requires transport channel (i.e. http, websocket, etc.) specific code with the only common denominator being that it decodes and encodes messages. ### Usage with TypeScript The library ships with its own [type definitions](https://github.com/dcodeIO/protobuf.js/blob/master/index.d.ts) and modern editors like [Visual Studio Code](https://code.visualstudio.com/) will automatically detect and use them for code completion. The npm package depends on [@types/node](https://www.npmjs.com/package/@types/node) because of `Buffer` and [@types/long](https://www.npmjs.com/package/@types/long) because of `Long`. If you are not building for node and/or not using long.js, it should be safe to exclude them manually. #### Using the JS API The API shown above works pretty much the same with TypeScript. However, because everything is typed, accessing fields on instances of dynamically generated message classes requires either using bracket-notation (i.e. `message["awesomeField"]`) or explicit casts. Alternatively, it is possible to use a [typings file generated for its static counterpart](#pbts-for-typescript). ```ts import { load } from "protobufjs"; // respectively "./node_modules/protobufjs" load("awesome.proto", function(err, root) { if (err) throw err; // example code const AwesomeMessage = root.lookupType("awesomepackage.AwesomeMessage"); let message = AwesomeMessage.create({ awesomeField: "hello" }); console.log(`message = ${JSON.stringify(message)}`); let buffer = AwesomeMessage.encode(message).finish(); console.log(`buffer = ${Array.prototype.toString.call(buffer)}`); let decoded = AwesomeMessage.decode(buffer); console.log(`decoded = ${JSON.stringify(decoded)}`); }); ``` #### Using generated static code If you generated static code to `bundle.js` using the CLI and its type definitions to `bundle.d.ts`, then you can just do: ```ts import { AwesomeMessage } from "./bundle.js"; // example code let message = AwesomeMessage.create({ awesomeField: "hello" }); let buffer = AwesomeMessage.encode(message).finish(); let decoded = AwesomeMessage.decode(buffer); ``` #### Using decorators The library also includes an early implementation of [decorators](https://www.typescriptlang.org/docs/handbook/decorators.html). **Note** that decorators are an experimental feature in TypeScript and that declaration order is important depending on the JS target. For example, `@Field.d(2, AwesomeArrayMessage)` requires that `AwesomeArrayMessage` has been defined earlier when targeting `ES5`. ```ts import { Message, Type, Field, OneOf } from "protobufjs/light"; // respectively "./node_modules/protobufjs/light.js" export class AwesomeSubMessage extends Message<AwesomeSubMessage> { @Field.d(1, "string") public awesomeString: string; } export enum AwesomeEnum { ONE = 1, TWO = 2 } @Type.d("SuperAwesomeMessage") export class AwesomeMessage extends Message<AwesomeMessage> { @Field.d(1, "string", "optional", "awesome default string") public awesomeField: string; @Field.d(2, AwesomeSubMessage) public awesomeSubMessage: AwesomeSubMessage; @Field.d(3, AwesomeEnum, "optional", AwesomeEnum.ONE) public awesomeEnum: AwesomeEnum; @OneOf.d("awesomeSubMessage", "awesomeEnum") public which: string; } // example code let message = new AwesomeMessage({ awesomeField: "hello" }); let buffer = AwesomeMessage.encode(message).finish(); let decoded = AwesomeMessage.decode(buffer); ``` Supported decorators are: * **Type.d(typeName?: `string`)** &nbsp; *(optional)*<br /> annotates a class as a protobuf message type. If `typeName` is not specified, the constructor's runtime function name is used for the reflected type. * **Field.d&lt;T>(fieldId: `number`, fieldType: `string | Constructor<T>`, fieldRule?: `"optional" | "required" | "repeated"`, defaultValue?: `T`)**<br /> annotates a property as a protobuf field with the specified id and protobuf type. * **MapField.d&lt;T extends { [key: string]: any }>(fieldId: `number`, fieldKeyType: `string`, fieldValueType. `string | Constructor<{}>`)**<br /> annotates a property as a protobuf map field with the specified id, protobuf key and value type. * **OneOf.d&lt;T extends string>(...fieldNames: `string[]`)**<br /> annotates a property as a protobuf oneof covering the specified fields. Other notes: * Decorated types reside in `protobuf.roots["decorated"]` using a flat structure, so no duplicate names. * Enums are copied to a reflected enum with a generic name on decorator evaluation because referenced enum objects have no runtime name the decorator could use. * Default values must be specified as arguments to the decorator instead of using a property initializer for proper prototype behavior. * Property names on decorated classes must not be renamed on compile time (i.e. by a minifier) because decorators just receive the original field name as a string. **ProTip!** Not as pretty, but you can [use decorators in plain JavaScript](https://github.com/dcodeIO/protobuf.js/blob/master/examples/js-decorators.js) as well. Command line ------------ **Note** that moving the CLI to [its own package](./cli) is a work in progress. At the moment, it's still part of the main package. The command line interface (CLI) can be used to translate between file formats and to generate static code as well as TypeScript definitions. ### pbjs for JavaScript ``` Translates between file formats and generates static code. -t, --target Specifies the target format. Also accepts a path to require a custom target. json JSON representation json-module JSON representation as a module proto2 Protocol Buffers, Version 2 proto3 Protocol Buffers, Version 3 static Static code without reflection (non-functional on its own) static-module Static code without reflection as a module -p, --path Adds a directory to the include path. -o, --out Saves to a file instead of writing to stdout. --sparse Exports only those types referenced from a main file (experimental). Module targets only: -w, --wrap Specifies the wrapper to use. Also accepts a path to require a custom wrapper. default Default wrapper supporting both CommonJS and AMD commonjs CommonJS wrapper amd AMD wrapper es6 ES6 wrapper (implies --es6) closure A closure adding to protobuf.roots where protobuf is a global -r, --root Specifies an alternative protobuf.roots name. -l, --lint Linter configuration. Defaults to protobuf.js-compatible rules: eslint-disable block-scoped-var, no-redeclare, no-control-regex, no-prototype-builtins --es6 Enables ES6 syntax (const/let instead of var) Proto sources only: --keep-case Keeps field casing instead of converting to camel case. Static targets only: --no-create Does not generate create functions used for reflection compatibility. --no-encode Does not generate encode functions. --no-decode Does not generate decode functions. --no-verify Does not generate verify functions. --no-convert Does not generate convert functions like from/toObject --no-delimited Does not generate delimited encode/decode functions. --no-beautify Does not beautify generated code. --no-comments Does not output any JSDoc comments. --force-long Enfores the use of 'Long' for s-/u-/int64 and s-/fixed64 fields. --force-message Enfores the use of message instances instead of plain objects. usage: pbjs [options] file1.proto file2.json ... (or pipe) other | pbjs [options] - ``` For production environments it is recommended to bundle all your .proto files to a single .json file, which minimizes the number of network requests and avoids any parser overhead (hint: works with just the **light** library): ``` $> pbjs -t json file1.proto file2.proto > bundle.json ``` Now, either include this file in your final bundle: ```js var root = protobuf.Root.fromJSON(require("./bundle.json")); ``` or load it the usual way: ```js protobuf.load("bundle.json", function(err, root) { ... }); ``` Generated static code, on the other hand, works with just the **minimal** library. For example ``` $> pbjs -t static-module -w commonjs -o compiled.js file1.proto file2.proto ``` will generate static code for definitions within `file1.proto` and `file2.proto` to a CommonJS module `compiled.js`. **ProTip!** Documenting your .proto files with `/** ... */`-blocks or (trailing) `/// ...` lines translates to generated static code. ### pbts for TypeScript ``` Generates TypeScript definitions from annotated JavaScript files. -o, --out Saves to a file instead of writing to stdout. -g, --global Name of the global object in browser environments, if any. --no-comments Does not output any JSDoc comments. Internal flags: -n, --name Wraps everything in a module of the specified name. -m, --main Whether building the main library without any imports. usage: pbts [options] file1.js file2.js ... (or) other | pbts [options] - ``` Picking up on the example above, the following not only generates static code to a CommonJS module `compiled.js` but also its respective TypeScript definitions to `compiled.d.ts`: ``` $> pbjs -t static-module -w commonjs -o compiled.js file1.proto file2.proto $> pbts -o compiled.d.ts compiled.js ``` Additionally, TypeScript definitions of static modules are compatible with their reflection-based counterparts (i.e. as exported by JSON modules), as long as the following conditions are met: 1. Instead of using `new SomeMessage(...)`, always use `SomeMessage.create(...)` because reflection objects do not provide a constructor. 2. Types, services and enums must start with an uppercase letter to become available as properties of the reflected types as well (i.e. to be able to use `MyMessage.MyEnum` instead of `root.lookup("MyMessage.MyEnum")`). For example, the following generates a JSON module `bundle.js` and a `bundle.d.ts`, but no static code: ``` $> pbjs -t json-module -w commonjs -o bundle.js file1.proto file2.proto $> pbjs -t static-module file1.proto file2.proto | pbts -o bundle.d.ts - ``` ### Reflection vs. static code While using .proto files directly requires the full library respectively pure reflection/JSON the light library, pretty much all code but the relatively short descriptors is shared. Static code, on the other hand, requires just the minimal library, but generates additional source code without any reflection features. This also implies that there is a break-even point where statically generated code becomes larger than descriptor-based code once the amount of code generated exceeds the size of the full respectively light library. There is no significant difference performance-wise as the code generated statically is pretty much the same as generated at runtime and both are largely interchangeable as seen in the previous section. | Source | Library | Advantages | Tradeoffs |--------|---------|------------|----------- | .proto | full | Easily editable<br />Interoperability with other libraries<br />No compile step | Some parsing and possibly network overhead | JSON | light | Easily editable<br />No parsing overhead<br />Single bundle (no network overhead) | protobuf.js specific<br />Has a compile step | static | minimal | Works where `eval` access is restricted<br />Fully documented<br />Small footprint for small protos | Can be hard to edit<br />No reflection<br />Has a compile step ### Command line API Both utilities can be used programmatically by providing command line arguments and a callback to their respective `main` functions: ```js var pbjs = require("protobufjs/cli/pbjs"); // or require("protobufjs/cli").pbjs / .pbts pbjs.main([ "--target", "json-module", "path/to/myproto.proto" ], function(err, output) { if (err) throw err; // do something with output }); ``` Additional documentation ------------------------ #### Protocol Buffers * [Google's Developer Guide](https://developers.google.com/protocol-buffers/docs/overview) #### protobuf.js * [API Documentation](http://dcode.io/protobuf.js) * [CHANGELOG](https://github.com/dcodeIO/protobuf.js/blob/master/CHANGELOG.md) * [Frequently asked questions](https://github.com/dcodeIO/protobuf.js/wiki) on our wiki #### Community * [Questions and answers](http://stackoverflow.com/search?tab=newest&q=protobuf.js) on StackOverflow Performance ----------- The package includes a benchmark that compares protobuf.js performance to native JSON (as far as this is possible) and [Google's JS implementation](https://github.com/google/protobuf/tree/master/js). On an i7-2600K running node 6.9.1 it yields: ``` benchmarking encoding performance ... protobuf.js (reflect) x 541,707 ops/sec ±1.13% (87 runs sampled) protobuf.js (static) x 548,134 ops/sec ±1.38% (89 runs sampled) JSON (string) x 318,076 ops/sec ±0.63% (93 runs sampled) JSON (buffer) x 179,165 ops/sec ±2.26% (91 runs sampled) google-protobuf x 74,406 ops/sec ±0.85% (86 runs sampled) protobuf.js (static) was fastest protobuf.js (reflect) was 0.9% ops/sec slower (factor 1.0) JSON (string) was 41.5% ops/sec slower (factor 1.7) JSON (buffer) was 67.6% ops/sec slower (factor 3.1) google-protobuf was 86.4% ops/sec slower (factor 7.3) benchmarking decoding performance ... protobuf.js (reflect) x 1,383,981 ops/sec ±0.88% (93 runs sampled) protobuf.js (static) x 1,378,925 ops/sec ±0.81% (93 runs sampled) JSON (string) x 302,444 ops/sec ±0.81% (93 runs sampled) JSON (buffer) x 264,882 ops/sec ±0.81% (93 runs sampled) google-protobuf x 179,180 ops/sec ±0.64% (94 runs sampled) protobuf.js (reflect) was fastest protobuf.js (static) was 0.3% ops/sec slower (factor 1.0) JSON (string) was 78.1% ops/sec slower (factor 4.6) JSON (buffer) was 80.8% ops/sec slower (factor 5.2) google-protobuf was 87.0% ops/sec slower (factor 7.7) benchmarking combined performance ... protobuf.js (reflect) x 275,900 ops/sec ±0.78% (90 runs sampled) protobuf.js (static) x 290,096 ops/sec ±0.96% (90 runs sampled) JSON (string) x 129,381 ops/sec ±0.77% (90 runs sampled) JSON (buffer) x 91,051 ops/sec ±0.94% (90 runs sampled) google-protobuf x 42,050 ops/sec ±0.85% (91 runs sampled) protobuf.js (static) was fastest protobuf.js (reflect) was 4.7% ops/sec slower (factor 1.0) JSON (string) was 55.3% ops/sec slower (factor 2.2) JSON (buffer) was 68.6% ops/sec slower (factor 3.2) google-protobuf was 85.5% ops/sec slower (factor 6.9) ``` These results are achieved by * generating type-specific encoders, decoders, verifiers and converters at runtime * configuring the reader/writer interface according to the environment * using node-specific functionality where beneficial and, of course * avoiding unnecessary operations through splitting up [the toolset](#toolset). You can also run [the benchmark](https://github.com/dcodeIO/protobuf.js/blob/master/bench/index.js) ... ``` $> npm run bench ``` and [the profiler](https://github.com/dcodeIO/protobuf.js/blob/master/bench/prof.js) yourself (the latter requires a recent version of node): ``` $> npm run prof <encode|decode|encode-browser|decode-browser> [iterations=10000000] ``` Note that as of this writing, the benchmark suite performs significantly slower on node 7.2.0 compared to 6.9.1 because moths. Compatibility ------------- * Works in all modern and not-so-modern browsers except IE8. * Because the internals of this package do not rely on `google/protobuf/descriptor.proto`, options are parsed and presented literally. * If typed arrays are not supported by the environment, plain arrays will be used instead. * Support for pre-ES5 environments (except IE8) can be achieved by [using a polyfill](https://github.com/dcodeIO/protobuf.js/blob/master/scripts/polyfill.js). * Support for [Content Security Policy](https://w3c.github.io/webappsec-csp/)-restricted environments (like Chrome extensions without [unsafe-eval](https://developer.chrome.com/extensions/contentSecurityPolicy#relaxing-eval)) can be achieved by generating and using static code instead. * If a proper way to work with 64 bit values (uint64, int64 etc.) is required, just install [long.js](https://github.com/dcodeIO/long.js) alongside this library. All 64 bit numbers will then be returned as a `Long` instance instead of a possibly unsafe JavaScript number ([see](https://github.com/dcodeIO/long.js)). * For descriptor.proto interoperability, see [ext/descriptor](https://github.com/dcodeIO/protobuf.js/tree/master/ext/descriptor) Building -------- To build the library or its components yourself, clone it from GitHub and install the development dependencies: ``` $> git clone https://github.com/dcodeIO/protobuf.js.git $> cd protobuf.js $> npm install ``` Building the respective development and production versions with their respective source maps to `dist/`: ``` $> npm run build ``` Building the documentation to `docs/`: ``` $> npm run docs ``` Building the TypeScript definition to `index.d.ts`: ``` $> npm run types ``` ### Browserify integration By default, protobuf.js integrates into any browserify build-process without requiring any optional modules. Hence: * If int64 support is required, explicitly require the `long` module somewhere in your project as it will be excluded otherwise. This assumes that a global `require` function is present that protobuf.js can call to obtain the long module. If there is no global `require` function present after bundling, it's also possible to assign the long module programmatically: ```js var Long = ...; protobuf.util.Long = Long; protobuf.configure(); ``` * If you have any special requirements, there is [the bundler](https://github.com/dcodeIO/protobuf.js/blob/master/scripts/bundle.js) for reference. **License:** [BSD 3-Clause License](https://opensource.org/licenses/BSD-3-Clause)
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/protobufjs/README.md
0.860061
0.935759
README.md
pypi
TweetNaCl.js ============ Port of [TweetNaCl](http://tweetnacl.cr.yp.to) / [NaCl](http://nacl.cr.yp.to/) to JavaScript for modern browsers and Node.js. Public domain. [![Build Status](https://travis-ci.org/dchest/tweetnacl-js.svg?branch=master) ](https://travis-ci.org/dchest/tweetnacl-js) Demo: <https://tweetnacl.js.org> **:warning: The library is stable and API is frozen, however it has not been independently reviewed. If you can help reviewing it, please [contact me](mailto:dmitry@codingrobots.com).** Documentation ============= * [Overview](#overview) * [Installation](#installation) * [Usage](#usage) * [Public-key authenticated encryption (box)](#public-key-authenticated-encryption-box) * [Secret-key authenticated encryption (secretbox)](#secret-key-authenticated-encryption-secretbox) * [Scalar multiplication](#scalar-multiplication) * [Signatures](#signatures) * [Hashing](#hashing) * [Random bytes generation](#random-bytes-generation) * [Constant-time comparison](#constant-time-comparison) * [System requirements](#system-requirements) * [Development and testing](#development-and-testing) * [Benchmarks](#benchmarks) * [Contributors](#contributors) * [Who uses it](#who-uses-it) Overview -------- The primary goal of this project is to produce a translation of TweetNaCl to JavaScript which is as close as possible to the original C implementation, plus a thin layer of idiomatic high-level API on top of it. There are two versions, you can use either of them: * `nacl.js` is the port of TweetNaCl with minimum differences from the original + high-level API. * `nacl-fast.js` is like `nacl.js`, but with some functions replaced with faster versions. Installation ------------ You can install TweetNaCl.js via a package manager: [Bower](http://bower.io): $ bower install tweetnacl [NPM](https://www.npmjs.org/): $ npm install tweetnacl or [download source code](https://github.com/dchest/tweetnacl-js/releases). Usage ----- All API functions accept and return bytes as `Uint8Array`s. If you need to encode or decode strings, use functions from <https://github.com/dchest/tweetnacl-util-js> or one of the more robust codec packages. In Node.js v4 and later `Buffer` objects are backed by `Uint8Array`s, so you can freely pass them to TweetNaCl.js functions as arguments. The returned objects are still `Uint8Array`s, so if you need `Buffer`s, you'll have to convert them manually; make sure to convert using copying: `new Buffer(array)`, instead of sharing: `new Buffer(array.buffer)`, because some functions return subarrays of their buffers. ### Public-key authenticated encryption (box) Implements *curve25519-xsalsa20-poly1305*. #### nacl.box.keyPair() Generates a new random key pair for box and returns it as an object with `publicKey` and `secretKey` members: { publicKey: ..., // Uint8Array with 32-byte public key secretKey: ... // Uint8Array with 32-byte secret key } #### nacl.box.keyPair.fromSecretKey(secretKey) Returns a key pair for box with public key corresponding to the given secret key. #### nacl.box(message, nonce, theirPublicKey, mySecretKey) Encrypt and authenticates message using peer's public key, our secret key, and the given nonce, which must be unique for each distinct message for a key pair. Returns an encrypted and authenticated message, which is `nacl.box.overheadLength` longer than the original message. #### nacl.box.open(box, nonce, theirPublicKey, mySecretKey) Authenticates and decrypts the given box with peer's public key, our secret key, and the given nonce. Returns the original message, or `false` if authentication fails. #### nacl.box.before(theirPublicKey, mySecretKey) Returns a precomputed shared key which can be used in `nacl.box.after` and `nacl.box.open.after`. #### nacl.box.after(message, nonce, sharedKey) Same as `nacl.box`, but uses a shared key precomputed with `nacl.box.before`. #### nacl.box.open.after(box, nonce, sharedKey) Same as `nacl.box.open`, but uses a shared key precomputed with `nacl.box.before`. #### nacl.box.publicKeyLength = 32 Length of public key in bytes. #### nacl.box.secretKeyLength = 32 Length of secret key in bytes. #### nacl.box.sharedKeyLength = 32 Length of precomputed shared key in bytes. #### nacl.box.nonceLength = 24 Length of nonce in bytes. #### nacl.box.overheadLength = 16 Length of overhead added to box compared to original message. ### Secret-key authenticated encryption (secretbox) Implements *xsalsa20-poly1305*. #### nacl.secretbox(message, nonce, key) Encrypt and authenticates message using the key and the nonce. The nonce must be unique for each distinct message for this key. Returns an encrypted and authenticated message, which is `nacl.secretbox.overheadLength` longer than the original message. #### nacl.secretbox.open(box, nonce, key) Authenticates and decrypts the given secret box using the key and the nonce. Returns the original message, or `false` if authentication fails. #### nacl.secretbox.keyLength = 32 Length of key in bytes. #### nacl.secretbox.nonceLength = 24 Length of nonce in bytes. #### nacl.secretbox.overheadLength = 16 Length of overhead added to secret box compared to original message. ### Scalar multiplication Implements *curve25519*. #### nacl.scalarMult(n, p) Multiplies an integer `n` by a group element `p` and returns the resulting group element. #### nacl.scalarMult.base(n) Multiplies an integer `n` by a standard group element and returns the resulting group element. #### nacl.scalarMult.scalarLength = 32 Length of scalar in bytes. #### nacl.scalarMult.groupElementLength = 32 Length of group element in bytes. ### Signatures Implements [ed25519](http://ed25519.cr.yp.to). #### nacl.sign.keyPair() Generates new random key pair for signing and returns it as an object with `publicKey` and `secretKey` members: { publicKey: ..., // Uint8Array with 32-byte public key secretKey: ... // Uint8Array with 64-byte secret key } #### nacl.sign.keyPair.fromSecretKey(secretKey) Returns a signing key pair with public key corresponding to the given 64-byte secret key. The secret key must have been generated by `nacl.sign.keyPair` or `nacl.sign.keyPair.fromSeed`. #### nacl.sign.keyPair.fromSeed(seed) Returns a new signing key pair generated deterministically from a 32-byte seed. The seed must contain enough entropy to be secure. This method is not recommended for general use: instead, use `nacl.sign.keyPair` to generate a new key pair from a random seed. #### nacl.sign(message, secretKey) Signs the message using the secret key and returns a signed message. #### nacl.sign.open(signedMessage, publicKey) Verifies the signed message and returns the message without signature. Returns `null` if verification failed. #### nacl.sign.detached(message, secretKey) Signs the message using the secret key and returns a signature. #### nacl.sign.detached.verify(message, signature, publicKey) Verifies the signature for the message and returns `true` if verification succeeded or `false` if it failed. #### nacl.sign.publicKeyLength = 32 Length of signing public key in bytes. #### nacl.sign.secretKeyLength = 64 Length of signing secret key in bytes. #### nacl.sign.seedLength = 32 Length of seed for `nacl.sign.keyPair.fromSeed` in bytes. #### nacl.sign.signatureLength = 64 Length of signature in bytes. ### Hashing Implements *SHA-512*. #### nacl.hash(message) Returns SHA-512 hash of the message. #### nacl.hash.hashLength = 64 Length of hash in bytes. ### Random bytes generation #### nacl.randomBytes(length) Returns a `Uint8Array` of the given length containing random bytes of cryptographic quality. **Implementation note** TweetNaCl.js uses the following methods to generate random bytes, depending on the platform it runs on: * `window.crypto.getRandomValues` (WebCrypto standard) * `window.msCrypto.getRandomValues` (Internet Explorer 11) * `crypto.randomBytes` (Node.js) If the platform doesn't provide a suitable PRNG, the following functions, which require random numbers, will throw exception: * `nacl.randomBytes` * `nacl.box.keyPair` * `nacl.sign.keyPair` Other functions are deterministic and will continue working. If a platform you are targeting doesn't implement secure random number generator, but you somehow have a cryptographically-strong source of entropy (not `Math.random`!), and you know what you are doing, you can plug it into TweetNaCl.js like this: nacl.setPRNG(function(x, n) { // ... copy n random bytes into x ... }); Note that `nacl.setPRNG` *completely replaces* internal random byte generator with the one provided. ### Constant-time comparison #### nacl.verify(x, y) Compares `x` and `y` in constant time and returns `true` if their lengths are non-zero and equal, and their contents are equal. Returns `false` if either of the arguments has zero length, or arguments have different lengths, or their contents differ. System requirements ------------------- TweetNaCl.js supports modern browsers that have a cryptographically secure pseudorandom number generator and typed arrays, including the latest versions of: * Chrome * Firefox * Safari (Mac, iOS) * Internet Explorer 11 Other systems: * Node.js Development and testing ------------------------ Install NPM modules needed for development: $ npm install To build minified versions: $ npm run build Tests use minified version, so make sure to rebuild it every time you change `nacl.js` or `nacl-fast.js`. ### Testing To run tests in Node.js: $ npm run test-node By default all tests described here work on `nacl.min.js`. To test other versions, set environment variable `NACL_SRC` to the file name you want to test. For example, the following command will test fast minified version: $ NACL_SRC=nacl-fast.min.js npm run test-node To run full suite of tests in Node.js, including comparing outputs of JavaScript port to outputs of the original C version: $ npm run test-node-all To prepare tests for browsers: $ npm run build-test-browser and then open `test/browser/test.html` (or `test/browser/test-fast.html`) to run them. To run headless browser tests with `tape-run` (powered by Electron): $ npm run test-browser (If you get `Error: spawn ENOENT`, install *xvfb*: `sudo apt-get install xvfb`.) To run tests in both Node and Electron: $ npm test ### Benchmarking To run benchmarks in Node.js: $ npm run bench $ NACL_SRC=nacl-fast.min.js npm run bench To run benchmarks in a browser, open `test/benchmark/bench.html` (or `test/benchmark/bench-fast.html`). Benchmarks ---------- For reference, here are benchmarks from MacBook Pro (Retina, 13-inch, Mid 2014) laptop with 2.6 GHz Intel Core i5 CPU (Intel) in Chrome 53/OS X and Xiaomi Redmi Note 3 smartphone with 1.8 GHz Qualcomm Snapdragon 650 64-bit CPU (ARM) in Chrome 52/Android: | | nacl.js Intel | nacl-fast.js Intel | nacl.js ARM | nacl-fast.js ARM | | ------------- |:-------------:|:-------------------:|:-------------:|:-----------------:| | salsa20 | 1.3 MB/s | 128 MB/s | 0.4 MB/s | 43 MB/s | | poly1305 | 13 MB/s | 171 MB/s | 4 MB/s | 52 MB/s | | hash | 4 MB/s | 34 MB/s | 0.9 MB/s | 12 MB/s | | secretbox 1K | 1113 op/s | 57583 op/s | 334 op/s | 14227 op/s | | box 1K | 145 op/s | 718 op/s | 37 op/s | 368 op/s | | scalarMult | 171 op/s | 733 op/s | 56 op/s | 380 op/s | | sign | 77 op/s | 200 op/s | 20 op/s | 61 op/s | | sign.open | 39 op/s | 102 op/s | 11 op/s | 31 op/s | (You can run benchmarks on your devices by clicking on the links at the bottom of the [home page](https://tweetnacl.js.org)). In short, with *nacl-fast.js* and 1024-byte messages you can expect to encrypt and authenticate more than 57000 messages per second on a typical laptop or more than 14000 messages per second on a $170 smartphone, sign about 200 and verify 100 messages per second on a laptop or 60 and 30 messages per second on a smartphone, per CPU core (with Web Workers you can do these operations in parallel), which is good enough for most applications. Contributors ------------ See AUTHORS.md file. Third-party libraries based on TweetNaCl.js ------------------------------------------- * [forward-secrecy](https://github.com/alax/forward-secrecy) — Axolotl ratchet implementation * [nacl-stream](https://github.com/dchest/nacl-stream-js) - streaming encryption * [tweetnacl-auth-js](https://github.com/dchest/tweetnacl-auth-js) — implementation of [`crypto_auth`](http://nacl.cr.yp.to/auth.html) * [chloride](https://github.com/dominictarr/chloride) - unified API for various NaCl modules Who uses it ----------- Some notable users of TweetNaCl.js: * [miniLock](http://minilock.io/) * [Stellar](https://www.stellar.org/)
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/tweetnacl/README.md
0.930435
0.658431
README.md
pypi
# update-notifier [![Build Status](https://travis-ci.org/yeoman/update-notifier.svg?branch=master)](https://travis-ci.org/yeoman/update-notifier) > Update notifications for your CLI app ![](screenshot.png) Inform users of your package of updates in a non-intrusive way. #### Contents - [Install](#install) - [Usage](#usage) - [How](#how) - [API](#api) - [About](#about) - [Users](#users) ## Install ``` $ npm install update-notifier ``` ## Usage ### Simple ```js const updateNotifier = require('update-notifier'); const pkg = require('./package.json'); updateNotifier({pkg}).notify(); ``` ### Comprehensive ```js const updateNotifier = require('update-notifier'); const pkg = require('./package.json'); // Checks for available update and returns an instance const notifier = updateNotifier({pkg}); // Notify using the built-in convenience method notifier.notify(); // `notifier.update` contains some useful info about the update console.log(notifier.update); /* { latest: '1.0.1', current: '1.0.0', type: 'patch', // Possible values: latest, major, minor, patch, prerelease, build name: 'pageres' } */ ``` ### Options and custom message ```js const notifier = updateNotifier({ pkg, updateCheckInterval: 1000 * 60 * 60 * 24 * 7 // 1 week }); if (notifier.update) { console.log(`Update available: ${notifier.update.latest}`); } ``` ## How Whenever you initiate the update notifier and it's not within the interval threshold, it will asynchronously check with npm in the background for available updates, then persist the result. The next time the notifier is initiated, the result will be loaded into the `.update` property. This prevents any impact on your package startup performance. The update check is done in a unref'ed [child process](https://nodejs.org/api/child_process.html#child_process_child_process_spawn_command_args_options). This means that if you call `process.exit`, the check will still be performed in its own process. The first time the user runs your app, it will check for an update, and even if an update is available, it will wait the specified `updateCheckInterval` before notifying the user. This is done to not be annoying to the user, but might surprise you as an implementer if you're testing whether it works. Check out [`example.js`](example.js) to quickly test out `update-notifier` and see how you can test that it works in your app. ## API ### notifier = updateNotifier(options) Checks if there is an available update. Accepts options defined below. Returns an instance with an `.update` property there is an available update, otherwise `undefined`. ### options #### pkg Type: `Object` ##### name *Required*<br> Type: `string` ##### version *Required*<br> Type: `string` #### updateCheckInterval Type: `number`<br> Default: `1000 * 60 * 60 * 24` *(1 day)* How often to check for updates. #### callback(error, update) Type: `Function` Passing a callback here will make it check for an update directly and report right away. Not recommended as you won't get the benefits explained in [`How`](#how). `update` is equal to `notifier.update`. ### notifier.notify([options]) Convenience method to display a notification message. *(See screenshot)* Only notifies if there is an update and the process is [TTY](https://nodejs.org/api/process.html#process_tty_terminals_and_process_stdout). #### options Type: `Object` ##### defer Type: `boolean`<br> Default: `true` Defer showing the notification to after the process has exited. ##### message Type: `string`<br> Default: [See above screenshot](https://github.com/yeoman/update-notifier#update-notifier-) Message that will be shown when an update is available. ##### isGlobal Type: `boolean`<br> Default: `true` Include the `-g` argument in the default message's `npm i` recommendation. You may want to change this if your CLI package can be installed as a dependency of another project, and don't want to recommend a global installation. This option is ignored if you supply your own `message` (see above). ##### boxenOpts Type: `Object`<br> Default: `{padding: 1, margin: 1, align: 'center', borderColor: 'yellow', borderStyle: 'round'}` *(See screenshot)* Options object that will be passed to [`boxen`](https://github.com/sindresorhus/boxen). ##### shouldNotifyInNpmScript Type: `boolean`<br> Default: `false` Allows notification to be shown when running as an npm script. ### User settings Users of your module have the ability to opt-out of the update notifier by changing the `optOut` property to `true` in `~/.config/configstore/update-notifier-[your-module-name].json`. The path is available in `notifier.config.path`. Users can also opt-out by [setting the environment variable](https://github.com/sindresorhus/guides/blob/master/set-environment-variables.md) `NO_UPDATE_NOTIFIER` with any value or by using the `--no-update-notifier` flag on a per run basis. The check is also skipped on CI automatically. ## About The idea for this module came from the desire to apply the browser update strategy to CLI tools, where everyone is always on the latest version. We first tried automatic updating, which we discovered wasn't popular. This is the second iteration of that idea, but limited to just update notifications. ## Users There are a bunch projects using it: - [npm](https://github.com/npm/npm) - Package manager for JavaScript - [Yeoman](http://yeoman.io) - Modern workflows for modern webapps - [AVA](https://ava.li) - Simple concurrent test runner - [XO](https://github.com/xojs/xo) - JavaScript happiness style linter - [Pageres](https://github.com/sindresorhus/pageres) - Capture website screenshots - [Node GH](http://nodegh.io) - GitHub command line tool [And 1600+ more…](https://www.npmjs.org/browse/depended/update-notifier) ## License BSD-2-Clause © Google
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/update-notifier/readme.md
0.436382
0.838349
readme.md
pypi
# v1.5.2 - Allow using `"consructor"` as an argument in `memoize` (#998) - Give a better error messsage when `auto` dependency checking fails (#994) - Various doc updates (#936, #956, #979, #1002) # v1.5.1 - Fix issue with `pause` in `queue` with concurrency enabled (#946) - `while` and `until` now pass the final result to callback (#963) - `auto` will properly handle concurrency when there is no callback (#966) - `auto` will now properly stop execution when an error occurs (#988, #993) - Various doc fixes (#971, #980) # v1.5.0 - Added `transform`, analogous to [`_.transform`](http://lodash.com/docs#transform) (#892) - `map` now returns an object when an object is passed in, rather than array with non-numeric keys. `map` will begin always returning an array with numeric indexes in the next major release. (#873) - `auto` now accepts an optional `concurrency` argument to limit the number of running tasks (#637) - Added `queue#workersList()`, to retrieve the list of currently running tasks. (#891) - Various code simplifications (#896, #904) - Various doc fixes :scroll: (#890, #894, #903, #905, #912) # v1.4.2 - Ensure coverage files don't get published on npm (#879) # v1.4.1 - Add in overlooked `detectLimit` method (#866) - Removed unnecessary files from npm releases (#861) - Removed usage of a reserved word to prevent :boom: in older environments (#870) # v1.4.0 - `asyncify` now supports promises (#840) - Added `Limit` versions of `filter` and `reject` (#836) - Add `Limit` versions of `detect`, `some` and `every` (#828, #829) - `some`, `every` and `detect` now short circuit early (#828, #829) - Improve detection of the global object (#804), enabling use in WebWorkers - `whilst` now called with arguments from iterator (#823) - `during` now gets called with arguments from iterator (#824) - Code simplifications and optimizations aplenty ([diff](https://github.com/caolan/async/compare/v1.3.0...v1.4.0)) # v1.3.0 New Features: - Added `constant` - Added `asyncify`/`wrapSync` for making sync functions work with callbacks. (#671, #806) - Added `during` and `doDuring`, which are like `whilst` with an async truth test. (#800) - `retry` now accepts an `interval` parameter to specify a delay between retries. (#793) - `async` should work better in Web Workers due to better `root` detection (#804) - Callbacks are now optional in `whilst`, `doWhilst`, `until`, and `doUntil` (#642) - Various internal updates (#786, #801, #802, #803) - Various doc fixes (#790, #794) Bug Fixes: - `cargo` now exposes the `payload` size, and `cargo.payload` can be changed on the fly after the `cargo` is created. (#740, #744, #783) # v1.2.1 Bug Fix: - Small regression with synchronous iterator behavior in `eachSeries` with a 1-element array. Before 1.1.0, `eachSeries`'s callback was called on the same tick, which this patch restores. In 2.0.0, it will be called on the next tick. (#782) # v1.2.0 New Features: - Added `timesLimit` (#743) - `concurrency` can be changed after initialization in `queue` by setting `q.concurrency`. The new concurrency will be reflected the next time a task is processed. (#747, #772) Bug Fixes: - Fixed a regression in `each` and family with empty arrays that have additional properties. (#775, #777) # v1.1.1 Bug Fix: - Small regression with synchronous iterator behavior in `eachSeries` with a 1-element array. Before 1.1.0, `eachSeries`'s callback was called on the same tick, which this patch restores. In 2.0.0, it will be called on the next tick. (#782) # v1.1.0 New Features: - `cargo` now supports all of the same methods and event callbacks as `queue`. - Added `ensureAsync` - A wrapper that ensures an async function calls its callback on a later tick. (#769) - Optimized `map`, `eachOf`, and `waterfall` families of functions - Passing a `null` or `undefined` array to `map`, `each`, `parallel` and families will be treated as an empty array (#667). - The callback is now optional for the composed results of `compose` and `seq`. (#618) - Reduced file size by 4kb, (minified version by 1kb) - Added code coverage through `nyc` and `coveralls` (#768) Bug Fixes: - `forever` will no longer stack overflow with a synchronous iterator (#622) - `eachLimit` and other limit functions will stop iterating once an error occurs (#754) - Always pass `null` in callbacks when there is no error (#439) - Ensure proper conditions when calling `drain()` after pushing an empty data set to a queue (#668) - `each` and family will properly handle an empty array (#578) - `eachSeries` and family will finish if the underlying array is modified during execution (#557) - `queue` will throw if a non-function is passed to `q.push()` (#593) - Doc fixes (#629, #766) # v1.0.0 No known breaking changes, we are simply complying with semver from here on out. Changes: - Start using a changelog! - Add `forEachOf` for iterating over Objects (or to iterate Arrays with indexes available) (#168 #704 #321) - Detect deadlocks in `auto` (#663) - Better support for require.js (#527) - Throw if queue created with concurrency `0` (#714) - Fix unneeded iteration in `queue.resume()` (#758) - Guard against timer mocking overriding `setImmediate` (#609 #611) - Miscellaneous doc fixes (#542 #596 #615 #628 #631 #690 #729) - Use single noop function internally (#546) - Optimize internal `_each`, `_map` and `_keys` functions.
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/async/CHANGELOG.md
0.864796
0.705658
CHANGELOG.md
pypi
# jsprim: utilities for primitive JavaScript types This module provides miscellaneous facilities for working with strings, numbers, dates, and objects and arrays of these basic types. ### deepCopy(obj) Creates a deep copy of a primitive type, object, or array of primitive types. ### deepEqual(obj1, obj2) Returns whether two objects are equal. ### isEmpty(obj) Returns true if the given object has no properties and false otherwise. This is O(1) (unlike `Object.keys(obj).length === 0`, which is O(N)). ### hasKey(obj, key) Returns true if the given object has an enumerable, non-inherited property called `key`. [For information on enumerability and ownership of properties, see the MDN documentation.](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Enumerability_and_ownership_of_properties) ### forEachKey(obj, callback) Like Array.forEach, but iterates enumerable, owned properties of an object rather than elements of an array. Equivalent to: for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { callback(key, obj[key]); } } ### flattenObject(obj, depth) Flattens an object up to a given level of nesting, returning an array of arrays of length "depth + 1", where the first "depth" elements correspond to flattened columns and the last element contains the remaining object . For example: flattenObject({ 'I': { 'A': { 'i': { 'datum1': [ 1, 2 ], 'datum2': [ 3, 4 ] }, 'ii': { 'datum1': [ 3, 4 ] } }, 'B': { 'i': { 'datum1': [ 5, 6 ] }, 'ii': { 'datum1': [ 7, 8 ], 'datum2': [ 3, 4 ], }, 'iii': { } } }, 'II': { 'A': { 'i': { 'datum1': [ 1, 2 ], 'datum2': [ 3, 4 ] } } } }, 3) becomes: [ [ 'I', 'A', 'i', { 'datum1': [ 1, 2 ], 'datum2': [ 3, 4 ] } ], [ 'I', 'A', 'ii', { 'datum1': [ 3, 4 ] } ], [ 'I', 'B', 'i', { 'datum1': [ 5, 6 ] } ], [ 'I', 'B', 'ii', { 'datum1': [ 7, 8 ], 'datum2': [ 3, 4 ] } ], [ 'I', 'B', 'iii', {} ], [ 'II', 'A', 'i', { 'datum1': [ 1, 2 ], 'datum2': [ 3, 4 ] } ] ] This function is strict: "depth" must be a non-negative integer and "obj" must be a non-null object with at least "depth" levels of nesting under all keys. ### flattenIter(obj, depth, func) This is similar to `flattenObject` except that instead of returning an array, this function invokes `func(entry)` for each `entry` in the array that `flattenObject` would return. `flattenIter(obj, depth, func)` is logically equivalent to `flattenObject(obj, depth).forEach(func)`. Importantly, this version never constructs the full array. Its memory usage is O(depth) rather than O(n) (where `n` is the number of flattened elements). There's another difference between `flattenObject` and `flattenIter` that's related to the special case where `depth === 0`. In this case, `flattenObject` omits the array wrapping `obj` (which is regrettable). ### pluck(obj, key) Fetch nested property "key" from object "obj", traversing objects as needed. For example, `pluck(obj, "foo.bar.baz")` is roughly equivalent to `obj.foo.bar.baz`, except that: 1. If traversal fails, the resulting value is undefined, and no error is thrown. For example, `pluck({}, "foo.bar")` is just undefined. 2. If "obj" has property "key" directly (without traversing), the corresponding property is returned. For example, `pluck({ 'foo.bar': 1 }, 'foo.bar')` is 1, not undefined. This is also true recursively, so `pluck({ 'a': { 'foo.bar': 1 } }, 'a.foo.bar')` is also 1, not undefined. ### randElt(array) Returns an element from "array" selected uniformly at random. If "array" is empty, throws an Error. ### startsWith(str, prefix) Returns true if the given string starts with the given prefix and false otherwise. ### endsWith(str, suffix) Returns true if the given string ends with the given suffix and false otherwise. ### parseInteger(str, options) Parses the contents of `str` (a string) as an integer. On success, the integer value is returned (as a number). On failure, an error is **returned** describing why parsing failed. By default, leading and trailing whitespace characters are not allowed, nor are trailing characters that are not part of the numeric representation. This behaviour can be toggled by using the options below. The empty string (`''`) is not considered valid input. If the return value cannot be precisely represented as a number (i.e., is smaller than `Number.MIN_SAFE_INTEGER` or larger than `Number.MAX_SAFE_INTEGER`), an error is returned. Additionally, the string `'-0'` will be parsed as the integer `0`, instead of as the IEEE floating point value `-0`. This function accepts both upper and lowercase characters for digits, similar to `parseInt()`, `Number()`, and [strtol(3C)](https://illumos.org/man/3C/strtol). The following may be specified in `options`: Option | Type | Default | Meaning ------------------ | ------- | ------- | --------------------------- base | number | 10 | numeric base (radix) to use, in the range 2 to 36 allowSign | boolean | true | whether to interpret any leading `+` (positive) and `-` (negative) characters allowImprecise | boolean | false | whether to accept values that may have lost precision (past `MAX_SAFE_INTEGER` or below `MIN_SAFE_INTEGER`) allowPrefix | boolean | false | whether to interpret the prefixes `0b` (base 2), `0o` (base 8), `0t` (base 10), or `0x` (base 16) allowTrailing | boolean | false | whether to ignore trailing characters trimWhitespace | boolean | false | whether to trim any leading or trailing whitespace/line terminators leadingZeroIsOctal | boolean | false | whether a leading zero indicates octal Note that if `base` is unspecified, and `allowPrefix` or `leadingZeroIsOctal` are, then the leading characters can change the default base from 10. If `base` is explicitly specified and `allowPrefix` is true, then the prefix will only be accepted if it matches the specified base. `base` and `leadingZeroIsOctal` cannot be used together. **Context:** It's tricky to parse integers with JavaScript's built-in facilities for several reasons: - `parseInt()` and `Number()` by default allow the base to be specified in the input string by a prefix (e.g., `0x` for hex). - `parseInt()` allows trailing nonnumeric characters. - `Number(str)` returns 0 when `str` is the empty string (`''`). - Both functions return incorrect values when the input string represents a valid integer outside the range of integers that can be represented precisely. Specifically, `parseInt('9007199254740993')` returns 9007199254740992. - Both functions always accept `-` and `+` signs before the digit. - Some older JavaScript engines always interpret a leading 0 as indicating octal, which can be surprising when parsing input from users who expect a leading zero to be insignificant. While each of these may be desirable in some contexts, there are also times when none of them are wanted. `parseInteger()` grants greater control over what input's permissible. ### iso8601(date) Converts a Date object to an ISO8601 date string of the form "YYYY-MM-DDTHH:MM:SS.sssZ". This format is not customizable. ### parseDateTime(str) Parses a date expressed as a string, as either a number of milliseconds since the epoch or any string format that Date accepts, giving preference to the former where these two sets overlap (e.g., strings containing small numbers). ### hrtimeDiff(timeA, timeB) Given two hrtime readings (as from Node's `process.hrtime()`), where timeA is later than timeB, compute the difference and return that as an hrtime. It is illegal to invoke this for a pair of times where timeB is newer than timeA. ### hrtimeAdd(timeA, timeB) Add two hrtime intervals (as from Node's `process.hrtime()`), returning a new hrtime interval array. This function does not modify either input argument. ### hrtimeAccum(timeA, timeB) Add two hrtime intervals (as from Node's `process.hrtime()`), storing the result in `timeA`. This function overwrites (and returns) the first argument passed in. ### hrtimeNanosec(timeA), hrtimeMicrosec(timeA), hrtimeMillisec(timeA) This suite of functions converts a hrtime interval (as from Node's `process.hrtime()`) into a scalar number of nanoseconds, microseconds or milliseconds. Results are truncated, as with `Math.floor()`. ### validateJsonObject(schema, object) Uses JSON validation (via JSV) to validate the given object against the given schema. On success, returns null. On failure, *returns* (does not throw) a useful Error object. ### extraProperties(object, allowed) Check an object for unexpected properties. Accepts the object to check, and an array of allowed property name strings. If extra properties are detected, an array of extra property names is returned. If no properties other than those in the allowed list are present on the object, the returned array will be of zero length. ### mergeObjects(provided, overrides, defaults) Merge properties from objects "provided", "overrides", and "defaults". The intended use case is for functions that accept named arguments in an "args" object, but want to provide some default values and override other values. In that case, "provided" is what the caller specified, "overrides" are what the function wants to override, and "defaults" contains default values. The function starts with the values in "defaults", overrides them with the values in "provided", and then overrides those with the values in "overrides". For convenience, any of these objects may be falsey, in which case they will be ignored. The input objects are never modified, but properties in the returned object are not deep-copied. For example: mergeObjects(undefined, { 'objectMode': true }, { 'highWaterMark': 0 }) returns: { 'objectMode': true, 'highWaterMark': 0 } For another example: mergeObjects( { 'highWaterMark': 16, 'objectMode': 7 }, /* from caller */ { 'objectMode': true }, /* overrides */ { 'highWaterMark': 0 }); /* default */ returns: { 'objectMode': true, 'highWaterMark': 16 } # Contributing See separate [contribution guidelines](CONTRIBUTING.md).
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/jsprim/README.md
0.929736
0.813461
README.md
pypi
jsondiffpatch ============= <!--- badges --> [![Build Status](https://secure.travis-ci.org/benjamine/jsondiffpatch.svg)](http://travis-ci.org/benjamine/jsondiffpatch) [![Code Climate](https://codeclimate.com/github/benjamine/jsondiffpatch/badges/gpa.svg)](https://codeclimate.com/github/benjamine/jsondiffpatch) [![Test Coverage](https://codeclimate.com/github/benjamine/jsondiffpatch/badges/coverage.svg)](https://codeclimate.com/github/benjamine/jsondiffpatch) [![NPM version](https://badge.fury.io/js/jsondiffpatch.svg)](http://badge.fury.io/js/jsondiffpatch) [![NPM dependencies](https://david-dm.org/benjamine/jsondiffpatch.svg)](https://david-dm.org/benjamine/jsondiffpatch) Diff & patch JavaScript objects ----- **[Live Demo](http://benjamine.github.com/jsondiffpatch/demo/index.html)** ----- - min+gzipped ~ 16KB - browser and server (`/dist` folder with bundles for UMD, commonjs, or ES modules) - (optionally) uses [google-diff-match-patch](http://code.google.com/p/google-diff-match-patch/) for long text diffs (diff at character level) - smart array diffing using [LCS](http://en.wikipedia.org/wiki/Longest_common_subsequence_problem), ***IMPORTANT NOTE:*** to match objects inside an array you must provide an ```objectHash``` function (this is how objects are matched, otherwise a dumb match by position is used). For more details, check [Array diff documentation](docs/arrays.md) - reverse a delta - unpatch (eg. revert object to its original state using a delta) - simplistic, pure JSON, low footprint [delta format](docs/deltas.md) - multiple output formatters: - html (check it at the [Live Demo](http://benjamine.github.com/jsondiffpatch/demo/index.html)) - annotated json (html), makes the JSON delta format self-explained - console (colored), try running ```./node_modules/.bin/jsondiffpatch left.json right.json``` - write your own! check [Formatters documentation](docs/formatters.md) - BONUS: `jsondiffpatch.clone(obj)` (deep clone) Supported platforms ---------------- * Any modern browser and IE8+ [![Testling Status](https://ci.testling.com/benjamine/jsondiffpatch.png)](https://ci.testling.com/benjamine/jsondiffpatch) And you can test your current browser visiting the [test page](http://benjamine.github.com/jsondiffpatch/test/index.html). * Node.js [![Build Status](https://secure.travis-ci.org/benjamine/jsondiffpatch.svg)](http://travis-ci.org/benjamine/jsondiffpatch) v4.8+ Usage ----- ``` javascript // sample data var country = { name: "Argentina", capital: "Buenos Aires", independence: new Date(1816, 6, 9), unasur: true }; // clone country, using dateReviver for Date objects var country2 = JSON.parse(JSON.stringify(country), jsondiffpatch.dateReviver); // make some changes country2.name = "Republica Argentina"; country2.population = 41324992; delete country2.capital; var delta = jsondiffpatch.diff(country, country2); assertSame(delta, { "name":["Argentina","Republica Argentina"], // old value, new value "population":["41324992"], // new value "capital":["Buenos Aires", 0, 0] // deleted }); // patch original jsondiffpatch.patch(country, delta); // reverse diff var reverseDelta = jsondiffpatch.reverse(delta); // also country2 can be return to original value with: jsondiffpatch.unpatch(country2, delta); var delta2 = jsondiffpatch.diff(country, country2); assert(delta2 === undefined) // undefined => no difference ``` Array diffing: ``` javascript // sample data var country = { name: "Argentina", cities: [ { name: 'Buenos Aires', population: 13028000, }, { name: 'Cordoba', population: 1430023, }, { name: 'Rosario', population: 1136286, }, { name: 'Mendoza', population: 901126, }, { name: 'San Miguel de Tucuman', population: 800000, } ] }; // clone country var country2 = JSON.parse(JSON.stringify(country)); // delete Cordoba country.cities.splice(1, 1); // add La Plata country.cities.splice(4, 0, { name: 'La Plata' }); // modify Rosario, and move it var rosario = country.cities.splice(1, 1)[0]; rosario.population += 1234; country.cities.push(rosario); // create a configured instance, match objects by name var diffpatcher = jsondiffpatch.create({ objectHash: function(obj) { return obj.name; } }); var delta = diffpatcher.diff(country, country2); assertSame(delta, { "cities": { "_t": "a", // indicates this node is an array (not an object) "1": [ // inserted at index 1 { "name": "Cordoba", "population": 1430023 }] , "2": { // population modified at index 2 (Rosario) "population": [ 1137520, 1136286 ] }, "_3": [ // removed from index 3 { "name": "La Plata" }, 0, 0], "_4": [ // move from index 4 to index 2 '', 2, 3] } }); ``` For more example cases (nested objects or arrays, long text diffs) check ```test/examples/``` If you want to understand deltas, see [delta format documentation](docs/deltas.md) Installing --------------- ### NPM This works for node, or in browsers if you already do bundling on your app ``` sh npm install jsondiffpatch ``` ``` js var jsondiffpatch = require('jsondiffpatch').create(options); ``` ### browser In a browser, you could load directly a bundle in `/dist`, eg. `/dist/jsondiffpatch.umd.js`. Options ------- ``` javascript var jsondiffpatch = require('jsondiffpatch').create({ // used to match objects when diffing arrays, by default only === operator is used objectHash: function(obj) { // this function is used only to when objects are not equal by ref return obj._id || obj.id; }, arrays: { // default true, detect items moved inside the array (otherwise they will be registered as remove+add) detectMove: true, // default false, the value of items moved is not included in deltas includeValueOnMove: false }, textDiff: { // default 60, minimum string length (left and right sides) to use text diff algorythm: google-diff-match-patch minLength: 60 }, propertyFilter: function(name, context) { /* this optional function can be specified to ignore object properties (eg. volatile data) name: property name, present in either context.left or context.right objects context: the diff context (has context.left and context.right objects) */ return name.slice(0, 1) !== '$'; }, cloneDiffValues: false /* default false. if true, values in the obtained delta will be cloned (using jsondiffpatch.clone by default), to ensure delta keeps no references to left or right objects. this becomes useful if you're diffing and patching the same objects multiple times without serializing deltas. instead of true, a function can be specified here to provide a custom clone(value) */ }); ``` Visual Diff ----------- ``` html <!DOCTYPE html> <html> <head> <script type='text/javascript' src="https://cdn.jsdelivr.net/npm/jsondiffpatch/dist/jsondiffpatch.umd.min.js"></script> <link rel="stylesheet" href="./style.css" type="text/css" /> <link rel="stylesheet" href="../formatters-styles/html.css" type="text/css" /> <link rel="stylesheet" href="../formatters-styles/annotated.css" type="text/css" /> </head> <body> <div id="visual"></div> <hr/> <div id="annotated"></div> <script> var left = { a: 3, b: 4 }; var right = { a: 5, c: 9 }; var delta = jsondiffpatch.diff(left, right); // beautiful html diff document.getElementById('visual').innerHTML = jsondiffpatch.formatters.html.format(delta, left); // self-explained json document.getElementById('annotated').innerHTML = jsondiffpatch.formatters.annotated.format(delta, left); </script> </body> </html> ``` To see formatters in action check the [Live Demo](http://benjamine.github.com/jsondiffpatch/demo/index.html). For more details check [Formatters documentation](docs/formatters.md) Console -------- ``` sh # diff two json files, colored output (using chalk lib) ./node_modules/.bin/jsondiffpatch ./left.json ./right.json # or install globally npm install -g jsondiffpatch jsondiffpatch ./demo/left.json ./demo/right.json ``` ![console_demo!](docs/demo/consoledemo.png) Plugins ------- ```diff()```, ```patch()``` and ```reverse()``` functions are implemented using Pipes & Filters pattern, making it extremely customizable by adding or replacing filters on a pipe. Check [Plugins documentation](docs/plugins.md) for details.
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/jsondiffpatch/README.md
0.627152
0.836688
README.md
pypi
# jsdiff [![Build Status](https://secure.travis-ci.org/kpdecker/jsdiff.svg)](http://travis-ci.org/kpdecker/jsdiff) [![Sauce Test Status](https://saucelabs.com/buildstatus/jsdiff)](https://saucelabs.com/u/jsdiff) A javascript text differencing implementation. Based on the algorithm proposed in ["An O(ND) Difference Algorithm and its Variations" (Myers, 1986)](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.4.6927). ## Installation ```bash npm install diff --save ``` or ```bash bower install jsdiff --save ``` ## API * `JsDiff.diffChars(oldStr, newStr[, options])` - diffs two blocks of text, comparing character by character. Returns a list of change objects (See below). Options * `ignoreCase`: `true` to ignore casing difference. Defaults to `false`. * `JsDiff.diffWords(oldStr, newStr[, options])` - diffs two blocks of text, comparing word by word, ignoring whitespace. Returns a list of change objects (See below). Options * `ignoreCase`: Same as in `diffChars`. * `JsDiff.diffWordsWithSpace(oldStr, newStr[, options])` - diffs two blocks of text, comparing word by word, treating whitespace as significant. Returns a list of change objects (See below). * `JsDiff.diffLines(oldStr, newStr[, options])` - diffs two blocks of text, comparing line by line. Options * `ignoreWhitespace`: `true` to ignore leading and trailing whitespace. This is the same as `diffTrimmedLines` * `newlineIsToken`: `true` to treat newline characters as separate tokens. This allows for changes to the newline structure to occur independently of the line content and to be treated as such. In general this is the more human friendly form of `diffLines` and `diffLines` is better suited for patches and other computer friendly output. Returns a list of change objects (See below). * `JsDiff.diffTrimmedLines(oldStr, newStr[, options])` - diffs two blocks of text, comparing line by line, ignoring leading and trailing whitespace. Returns a list of change objects (See below). * `JsDiff.diffSentences(oldStr, newStr[, options])` - diffs two blocks of text, comparing sentence by sentence. Returns a list of change objects (See below). * `JsDiff.diffCss(oldStr, newStr[, options])` - diffs two blocks of text, comparing CSS tokens. Returns a list of change objects (See below). * `JsDiff.diffJson(oldObj, newObj[, options])` - diffs two JSON objects, comparing the fields defined on each. The order of fields, etc does not matter in this comparison. Returns a list of change objects (See below). * `JsDiff.diffArrays(oldArr, newArr[, options])` - diffs two arrays, comparing each item for strict equality (===). Options * `comparator`: `function(left, right)` for custom equality checks Returns a list of change objects (See below). * `JsDiff.createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader)` - creates a unified diff patch. Parameters: * `oldFileName` : String to be output in the filename section of the patch for the removals * `newFileName` : String to be output in the filename section of the patch for the additions * `oldStr` : Original string value * `newStr` : New string value * `oldHeader` : Additional information to include in the old file header * `newHeader` : Additional information to include in the new file header * `options` : An object with options. Currently, only `context` is supported and describes how many lines of context should be included. * `JsDiff.createPatch(fileName, oldStr, newStr, oldHeader, newHeader)` - creates a unified diff patch. Just like JsDiff.createTwoFilesPatch, but with oldFileName being equal to newFileName. * `JsDiff.structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options)` - returns an object with an array of hunk objects. This method is similar to createTwoFilesPatch, but returns a data structure suitable for further processing. Parameters are the same as createTwoFilesPatch. The data structure returned may look like this: ```js { oldFileName: 'oldfile', newFileName: 'newfile', oldHeader: 'header1', newHeader: 'header2', hunks: [{ oldStart: 1, oldLines: 3, newStart: 1, newLines: 3, lines: [' line2', ' line3', '-line4', '+line5', '\\ No newline at end of file'], }] } ``` * `JsDiff.applyPatch(source, patch[, options])` - applies a unified diff patch. Return a string containing new version of provided data. `patch` may be a string diff or the output from the `parsePatch` or `structuredPatch` methods. The optional `options` object may have the following keys: - `fuzzFactor`: Number of lines that are allowed to differ before rejecting a patch. Defaults to 0. - `compareLine(lineNumber, line, operation, patchContent)`: Callback used to compare to given lines to determine if they should be considered equal when patching. Defaults to strict equality but may be overridden to provide fuzzier comparison. Should return false if the lines should be rejected. * `JsDiff.applyPatches(patch, options)` - applies one or more patches. This method will iterate over the contents of the patch and apply to data provided through callbacks. The general flow for each patch index is: - `options.loadFile(index, callback)` is called. The caller should then load the contents of the file and then pass that to the `callback(err, data)` callback. Passing an `err` will terminate further patch execution. - `options.patched(index, content, callback)` is called once the patch has been applied. `content` will be the return value from `applyPatch`. When it's ready, the caller should call `callback(err)` callback. Passing an `err` will terminate further patch execution. Once all patches have been applied or an error occurs, the `options.complete(err)` callback is made. * `JsDiff.parsePatch(diffStr)` - Parses a patch into structured data Return a JSON object representation of the a patch, suitable for use with the `applyPatch` method. This parses to the same structure returned by `JsDiff.structuredPatch`. * `convertChangesToXML(changes)` - converts a list of changes to a serialized XML format All methods above which accept the optional `callback` method will run in sync mode when that parameter is omitted and in async mode when supplied. This allows for larger diffs without blocking the event loop. This may be passed either directly as the final parameter or as the `callback` field in the `options` object. ### Change Objects Many of the methods above return change objects. These objects consist of the following fields: * `value`: Text content * `added`: True if the value was inserted into the new string * `removed`: True of the value was removed from the old string Note that some cases may omit a particular flag field. Comparison on the flag fields should always be done in a truthy or falsy manner. ## Examples Basic example in Node ```js require('colors'); var jsdiff = require('diff'); var one = 'beep boop'; var other = 'beep boob blah'; var diff = jsdiff.diffChars(one, other); diff.forEach(function(part){ // green for additions, red for deletions // grey for common parts var color = part.added ? 'green' : part.removed ? 'red' : 'grey'; process.stderr.write(part.value[color]); }); console.log(); ``` Running the above program should yield <img src="images/node_example.png" alt="Node Example"> Basic example in a web page ```html <pre id="display"></pre> <script src="diff.js"></script> <script> var one = 'beep boop', other = 'beep boob blah', color = '', span = null; var diff = JsDiff.diffChars(one, other), display = document.getElementById('display'), fragment = document.createDocumentFragment(); diff.forEach(function(part){ // green for additions, red for deletions // grey for common parts color = part.added ? 'green' : part.removed ? 'red' : 'grey'; span = document.createElement('span'); span.style.color = color; span.appendChild(document .createTextNode(part.value)); fragment.appendChild(span); }); display.appendChild(fragment); </script> ``` Open the above .html file in a browser and you should see <img src="images/web_example.png" alt="Node Example"> **[Full online demo](http://kpdecker.github.com/jsdiff)** ## Compatibility [![Sauce Test Status](https://saucelabs.com/browser-matrix/jsdiff.svg)](https://saucelabs.com/u/jsdiff) jsdiff supports all ES3 environments with some known issues on IE8 and below. Under these browsers some diff algorithms such as word diff and others may fail due to lack of support for capturing groups in the `split` operation. ## License See [LICENSE](https://github.com/kpdecker/jsdiff/blob/master/LICENSE).
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/diff/README.md
0.862569
0.966976
README.md
pypi
## Pure JS character encoding conversion [![Build Status](https://travis-ci.org/ashtuchkin/iconv-lite.svg?branch=master)](https://travis-ci.org/ashtuchkin/iconv-lite) * Doesn't need native code compilation. Works on Windows and in sandboxed environments like [Cloud9](http://c9.io). * Used in popular projects like [Express.js (body_parser)](https://github.com/expressjs/body-parser), [Grunt](http://gruntjs.com/), [Nodemailer](http://www.nodemailer.com/), [Yeoman](http://yeoman.io/) and others. * Faster than [node-iconv](https://github.com/bnoordhuis/node-iconv) (see below for performance comparison). * Intuitive encode/decode API * Streaming support for Node v0.10+ * [Deprecated] Can extend Node.js primitives (buffers, streams) to support all iconv-lite encodings. * In-browser usage via [Browserify](https://github.com/substack/node-browserify) (~180k gzip compressed with Buffer shim included). * Typescript [type definition file](https://github.com/ashtuchkin/iconv-lite/blob/master/lib/index.d.ts) included. * React Native is supported (need to explicitly `npm install` two more modules: `buffer` and `stream`). * License: MIT. [![NPM Stats](https://nodei.co/npm/iconv-lite.png?downloads=true&downloadRank=true)](https://npmjs.org/packages/iconv-lite/) ## Usage ### Basic API ```javascript var iconv = require('iconv-lite'); // Convert from an encoded buffer to js string. str = iconv.decode(Buffer.from([0x68, 0x65, 0x6c, 0x6c, 0x6f]), 'win1251'); // Convert from js string to an encoded buffer. buf = iconv.encode("Sample input string", 'win1251'); // Check if encoding is supported iconv.encodingExists("us-ascii") ``` ### Streaming API (Node v0.10+) ```javascript // Decode stream (from binary stream to js strings) http.createServer(function(req, res) { var converterStream = iconv.decodeStream('win1251'); req.pipe(converterStream); converterStream.on('data', function(str) { console.log(str); // Do something with decoded strings, chunk-by-chunk. }); }); // Convert encoding streaming example fs.createReadStream('file-in-win1251.txt') .pipe(iconv.decodeStream('win1251')) .pipe(iconv.encodeStream('ucs2')) .pipe(fs.createWriteStream('file-in-ucs2.txt')); // Sugar: all encode/decode streams have .collect(cb) method to accumulate data. http.createServer(function(req, res) { req.pipe(iconv.decodeStream('win1251')).collect(function(err, body) { assert(typeof body == 'string'); console.log(body); // full request body string }); }); ``` ### [Deprecated] Extend Node.js own encodings > NOTE: This doesn't work on latest Node versions. See [details](https://github.com/ashtuchkin/iconv-lite/wiki/Node-v4-compatibility). ```javascript // After this call all Node basic primitives will understand iconv-lite encodings. iconv.extendNodeEncodings(); // Examples: buf = new Buffer(str, 'win1251'); buf.write(str, 'gbk'); str = buf.toString('latin1'); assert(Buffer.isEncoding('iso-8859-15')); Buffer.byteLength(str, 'us-ascii'); http.createServer(function(req, res) { req.setEncoding('big5'); req.collect(function(err, body) { console.log(body); }); }); fs.createReadStream("file.txt", "shift_jis"); // External modules are also supported (if they use Node primitives, which they probably do). request = require('request'); request({ url: "http://github.com/", encoding: "cp932" }); // To remove extensions iconv.undoExtendNodeEncodings(); ``` ## Supported encodings * All node.js native encodings: utf8, ucs2 / utf16-le, ascii, binary, base64, hex. * Additional unicode encodings: utf16, utf16-be, utf-7, utf-7-imap. * All widespread singlebyte encodings: Windows 125x family, ISO-8859 family, IBM/DOS codepages, Macintosh family, KOI8 family, all others supported by iconv library. Aliases like 'latin1', 'us-ascii' also supported. * All widespread multibyte encodings: CP932, CP936, CP949, CP950, GB2312, GBK, GB18030, Big5, Shift_JIS, EUC-JP. See [all supported encodings on wiki](https://github.com/ashtuchkin/iconv-lite/wiki/Supported-Encodings). Most singlebyte encodings are generated automatically from [node-iconv](https://github.com/bnoordhuis/node-iconv). Thank you Ben Noordhuis and libiconv authors! Multibyte encodings are generated from [Unicode.org mappings](http://www.unicode.org/Public/MAPPINGS/) and [WHATWG Encoding Standard mappings](http://encoding.spec.whatwg.org/). Thank you, respective authors! ## Encoding/decoding speed Comparison with node-iconv module (1000x256kb, on MacBook Pro, Core i5/2.6 GHz, Node v0.12.0). Note: your results may vary, so please always check on your hardware. operation iconv@2.1.4 iconv-lite@0.4.7 ---------------------------------------------------------- encode('win1251') ~96 Mb/s ~320 Mb/s decode('win1251') ~95 Mb/s ~246 Mb/s ## BOM handling * Decoding: BOM is stripped by default, unless overridden by passing `stripBOM: false` in options (f.ex. `iconv.decode(buf, enc, {stripBOM: false})`). A callback might also be given as a `stripBOM` parameter - it'll be called if BOM character was actually found. * If you want to detect UTF-8 BOM when decoding other encodings, use [node-autodetect-decoder-stream](https://github.com/danielgindi/node-autodetect-decoder-stream) module. * Encoding: No BOM added, unless overridden by `addBOM: true` option. ## UTF-16 Encodings This library supports UTF-16LE, UTF-16BE and UTF-16 encodings. First two are straightforward, but UTF-16 is trying to be smart about endianness in the following ways: * Decoding: uses BOM and 'spaces heuristic' to determine input endianness. Default is UTF-16LE, but can be overridden with `defaultEncoding: 'utf-16be'` option. Strips BOM unless `stripBOM: false`. * Encoding: uses UTF-16LE and writes BOM by default. Use `addBOM: false` to override. ## Other notes When decoding, be sure to supply a Buffer to decode() method, otherwise [bad things usually happen](https://github.com/ashtuchkin/iconv-lite/wiki/Use-Buffers-when-decoding). Untranslatable characters are set to � or ?. No transliteration is currently supported. Node versions 0.10.31 and 0.11.13 are buggy, don't use them (see #65, #77). ## Testing ```bash $ git clone git@github.com:ashtuchkin/iconv-lite.git $ cd iconv-lite $ npm install $ npm test $ # To view performance: $ node test/performance.js $ # To view test coverage: $ npm run coverage $ open coverage/lcov-report/index.html ```
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/iconv-lite/README.md
0.544559
0.759939
README.md
pypi
# diff-match-patch npm package for https://github.com/google/diff-match-patch [![Build Status](https://img.shields.io/travis/JackuB/diff-match-patch/master.svg)](https://travis-ci.org/JackuB/diff-match-patch) [![Dependency Status](https://img.shields.io/david/JackuB/diff-match-patch.svg)](https://david-dm.org/JackuB/diff-match-patch) [![NPM version](https://img.shields.io/npm/v/diff-match-patch.svg)](https://www.npmjs.com/package/diff-match-patch) [![Known Vulnerabilities](https://snyk.io/test/github/JackuB/diff-match-patch/badge.svg)](https://snyk.io/test/github/JackuB/diff-match-patch) ## Installation npm install diff-match-patch ## API [Source](https://github.com/google/diff-match-patch/wiki/API) ### Initialization The first step is to create a new `diff_match_patch` object. This object contains various properties which set the behaviour of the algorithms, as well as the following methods/functions: ### diff_main(text1, text2) → diffs An array of differences is computed which describe the transformation of text1 into text2. Each difference is an array (JavaScript, Lua) or tuple (Python) or Diff object (C++, C#, Objective C, Java). The first element specifies if it is an insertion (1), a deletion (-1) or an equality (0). The second element specifies the affected text. ```diff_main("Good dog", "Bad dog") → [(-1, "Goo"), (1, "Ba"), (0, "d dog")]``` Despite the large number of optimisations used in this function, diff can take a while to compute. The `diff_match_patch.Diff_Timeout` property is available to set how many seconds any diff's exploration phase may take. The default value is 1.0. A value of 0 disables the timeout and lets diff run until completion. Should diff timeout, the return value will still be a valid difference, though probably non-optimal. ### diff_cleanupSemantic(diffs) → null A diff of two unrelated texts can be filled with coincidental matches. For example, the diff of "mouse" and "sofas" is `[(-1, "m"), (1, "s"), (0, "o"), (-1, "u"), (1, "fa"), (0, "s"), (-1, "e")]`. While this is the optimum diff, it is difficult for humans to understand. Semantic cleanup rewrites the diff, expanding it into a more intelligible format. The above example would become: `[(-1, "mouse"), (1, "sofas")]`. If a diff is to be human-readable, it should be passed to `diff_cleanupSemantic`. ### diff_cleanupEfficiency(diffs) → null This function is similar to `diff_cleanupSemantic`, except that instead of optimising a diff to be human-readable, it optimises the diff to be efficient for machine processing. The results of both cleanup types are often the same. The efficiency cleanup is based on the observation that a diff made up of large numbers of small diffs edits may take longer to process (in downstream applications) or take more capacity to store or transmit than a smaller number of larger diffs. The `diff_match_patch.Diff_EditCost` property sets what the cost of handling a new edit is in terms of handling extra characters in an existing edit. The default value is 4, which means if expanding the length of a diff by three characters can eliminate one edit, then that optimisation will reduce the total costs. ### diff_levenshtein(diffs) → int Given a diff, measure its Levenshtein distance in terms of the number of inserted, deleted or substituted characters. The minimum distance is 0 which means equality, the maximum distance is the length of the longer string. ### diff_prettyHtml(diffs) → html Takes a diff array and returns a pretty HTML sequence. This function is mainly intended as an example from which to write ones own display functions. ### match_main(text, pattern, loc) → location Given a text to search, a pattern to search for and an expected location in the text near which to find the pattern, return the location which matches closest. The function will search for the best match based on both the number of character errors between the pattern and the potential match, as well as the distance between the expected location and the potential match. The following example is a classic dilemma. There are two potential matches, one is close to the expected location but contains a one character error, the other is far from the expected location but is exactly the pattern sought after: `match_main("abc12345678901234567890abbc", "abc", 26)` Which result is returned (0 or 24) is determined by the `diff_match_patch.Match_Distance` property. An exact letter match which is 'distance' characters away from the fuzzy location would score as a complete mismatch. For example, a distance of '0' requires the match be at the exact location specified, whereas a threshold of '1000' would require a perfect match to be within 800 characters of the expected location to be found using a 0.8 threshold (see below). The larger Match_Distance is, the slower match_main() may take to compute. This variable defaults to 1000. Another property is `diff_match_patch.Match_Threshold` which determines the cut-off value for a valid match. If Match_Threshold is closer to 0, the requirements for accuracy increase. If Match_Threshold is closer to 1 then it is more likely that a match will be found. The larger Match_Threshold is, the slower match_main() may take to compute. This variable defaults to 0.5. If no match is found, the function returns -1. ### patch_make(text1, text2) → patches ### patch_make(diffs) → patches ### patch_make(text1, diffs) → patches Given two texts, or an already computed list of differences, return an array of patch objects. The third form (text1, diffs) is preferred, use it if you happen to have that data available, otherwise this function will compute the missing pieces. ### patch_toText(patches) → text Reduces an array of patch objects to a block of text which looks extremely similar to the standard GNU diff/patch format. This text may be stored or transmitted. ### patch_fromText(text) → patches Parses a block of text (which was presumably created by the patch_toText function) and returns an array of patch objects. ### patch_apply(patches, text1) → [text2, results] Applies a list of patches to text1. The first element of the return value is the newly patched text. The second element is an array of true/false values indicating which of the patches were successfully applied. [Note that this second element is not too useful since large patches may get broken up internally, resulting in a longer results list than the input with no way to figure out which patch succeeded or failed. A more informative API is in development.] The previously mentioned Match_Distance and Match_Threshold properties are used to evaluate patch application on text which does not match exactly. In addition, the `diff_match_patch.Patch_DeleteThreshold` property determines how closely the text within a major (~64 character) delete needs to match the expected text. If Patch_DeleteThreshold is closer to 0, then the deleted text must match the expected text more closely. If Patch_DeleteThreshold is closer to 1, then the deleted text may contain anything. In most use cases Patch_DeleteThreshold should just be set to the same value as Match_Threshold. ## Usage ```javascript import DiffMatchPatch from 'diff-match-patch'; const dmp = new DiffMatchPatch(); const diff = dmp.diff_main('dogs bark', 'cats bark'); // You can also use the following properties: DiffMatchPatch.DIFF_DELETE = -1; DiffMatchPatch.DIFF_INSERT = 1; DiffMatchPatch.DIFF_EQUAL = 0; ``` ## License http://www.apache.org/licenses/LICENSE-2.0
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/diff-match-patch/README.md
0.943764
0.930395
README.md
pypi
<img align="right" alt="Ajv logo" width="160" src="http://epoberezkin.github.io/ajv/images/ajv_logo.png"> # Ajv: Another JSON Schema Validator The fastest JSON Schema validator for Node.js and browser with draft 6 support. [![Build Status](https://travis-ci.org/epoberezkin/ajv.svg?branch=master)](https://travis-ci.org/epoberezkin/ajv) [![npm version](https://badge.fury.io/js/ajv.svg)](https://www.npmjs.com/package/ajv) [![npm@beta](https://img.shields.io/npm/v/ajv/beta.svg)](https://github.com/epoberezkin/ajv/tree/beta) [![npm downloads](https://img.shields.io/npm/dm/ajv.svg)](https://www.npmjs.com/package/ajv) [![Coverage Status](https://coveralls.io/repos/epoberezkin/ajv/badge.svg?branch=master&service=github)](https://coveralls.io/github/epoberezkin/ajv?branch=master) [![Greenkeeper badge](https://badges.greenkeeper.io/epoberezkin/ajv.svg)](https://greenkeeper.io/) [![Gitter](https://img.shields.io/gitter/room/ajv-validator/ajv.svg)](https://gitter.im/ajv-validator/ajv) __Please note__: Ajv [version 6](https://github.com/epoberezkin/ajv/tree/beta) with [JSON Schema draft-07](http://json-schema.org/work-in-progress) support is released. Use `npm install ajv@beta` to install. ## Using version 5 [JSON Schema draft-06](https://trac.tools.ietf.org/html/draft-wright-json-schema-validation-01) is published. [Ajv version 5.0.0](https://github.com/epoberezkin/ajv/releases/tag/5.0.0) that supports draft-06 is released. It may require either migrating your schemas or updating your code (to continue using draft-04 and v5 schemas). __Please note__: To use Ajv with draft-04 schemas you need to explicitly add meta-schema to the validator instance: ```javascript ajv.addMetaSchema(require('ajv/lib/refs/json-schema-draft-04.json')); ``` ## Contents - [Performance](#performance) - [Features](#features) - [Getting started](#getting-started) - [Frequently Asked Questions](https://github.com/epoberezkin/ajv/blob/master/FAQ.md) - [Using in browser](#using-in-browser) - [Command line interface](#command-line-interface) - Validation - [Keywords](#validation-keywords) - [Formats](#formats) - [Combining schemas with $ref](#ref) - [$data reference](#data-reference) - NEW: [$merge and $patch keywords](#merge-and-patch-keywords) - [Defining custom keywords](#defining-custom-keywords) - [Asynchronous schema compilation](#asynchronous-schema-compilation) - [Asynchronous validation](#asynchronous-validation) - Modifying data during validation - [Filtering data](#filtering-data) - [Assigning defaults](#assigning-defaults) - [Coercing data types](#coercing-data-types) - API - [Methods](#api) - [Options](#options) - [Validation errors](#validation-errors) - [Related packages](#related-packages) - [Packages using Ajv](#some-packages-using-ajv) - [Tests, Contributing, History, License](#tests) ## Performance Ajv generates code using [doT templates](https://github.com/olado/doT) to turn JSON schemas into super-fast validation functions that are efficient for v8 optimization. Currently Ajv is the fastest and the most standard compliant validator according to these benchmarks: - [json-schema-benchmark](https://github.com/ebdrup/json-schema-benchmark) - 50% faster than the second place - [jsck benchmark](https://github.com/pandastrike/jsck#benchmarks) - 20-190% faster - [z-schema benchmark](https://rawgit.com/zaggino/z-schema/master/benchmark/results.html) - [themis benchmark](https://cdn.rawgit.com/playlyfe/themis/master/benchmark/results.html) Performance of different validators by [json-schema-benchmark](https://github.com/ebdrup/json-schema-benchmark): [![performance](https://chart.googleapis.com/chart?chxt=x,y&cht=bhs&chco=76A4FB&chls=2.0&chbh=32,4,1&chs=600x416&chxl=-1:|djv|ajv|json-schema-validator-generator|jsen|is-my-json-valid|themis|z-schema|jsck|skeemas|json-schema-library|tv4&chd=t:100,98,72.1,66.8,50.1,15.1,6.1,3.8,1.2,0.7,0.2)](https://github.com/ebdrup/json-schema-benchmark/blob/master/README.md#performance) ## Features - Ajv implements full JSON Schema [draft 6](http://json-schema.org/) and draft 4 standards: - all validation keywords (see [JSON Schema validation keywords](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md)) - full support of remote refs (remote schemas have to be added with `addSchema` or compiled to be available) - support of circular references between schemas - correct string lengths for strings with unicode pairs (can be turned off) - [formats](#formats) defined by JSON Schema draft 4 standard and custom formats (can be turned off) - [validates schemas against meta-schema](#api-validateschema) - supports [browsers](#using-in-browser) and Node.js 0.10-8.x - [asynchronous loading](#asynchronous-schema-compilation) of referenced schemas during compilation - "All errors" validation mode with [option allErrors](#options) - [error messages with parameters](#validation-errors) describing error reasons to allow creating custom error messages - i18n error messages support with [ajv-i18n](https://github.com/epoberezkin/ajv-i18n) package - [filtering data](#filtering-data) from additional properties - [assigning defaults](#assigning-defaults) to missing properties and items - [coercing data](#coercing-data-types) to the types specified in `type` keywords - [custom keywords](#defining-custom-keywords) - draft-6 keywords `const`, `contains` and `propertyNames` - draft-6 boolean schemas (`true`/`false` as a schema to always pass/fail). - keywords `switch`, `patternRequired`, `formatMaximum` / `formatMinimum` and `formatExclusiveMaximum` / `formatExclusiveMinimum` from [JSON-schema extension proposals](https://github.com/json-schema/json-schema/wiki/v5-Proposals) with [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) package - [$data reference](#data-reference) to use values from the validated data as values for the schema keywords - [asynchronous validation](#asynchronous-validation) of custom formats and keywords Currently Ajv is the only validator that passes all the tests from [JSON Schema Test Suite](https://github.com/json-schema/JSON-Schema-Test-Suite) (according to [json-schema-benchmark](https://github.com/ebdrup/json-schema-benchmark), apart from the test that requires that `1.0` is not an integer that is impossible to satisfy in JavaScript). ## Install ``` npm install ajv ``` or to install [version 6](https://github.com/epoberezkin/ajv/tree/beta): ``` npm install ajv@beta ``` ## <a name="usage"></a>Getting started Try it in the Node.js REPL: https://tonicdev.com/npm/ajv The fastest validation call: ```javascript var Ajv = require('ajv'); var ajv = new Ajv(); // options can be passed, e.g. {allErrors: true} var validate = ajv.compile(schema); var valid = validate(data); if (!valid) console.log(validate.errors); ``` or with less code ```javascript // ... var valid = ajv.validate(schema, data); if (!valid) console.log(ajv.errors); // ... ``` or ```javascript // ... var valid = ajv.addSchema(schema, 'mySchema') .validate('mySchema', data); if (!valid) console.log(ajv.errorsText()); // ... ``` See [API](#api) and [Options](#options) for more details. Ajv compiles schemas to functions and caches them in all cases (using schema serialized with [fast-json-stable-stringify](https://github.com/epoberezkin/fast-json-stable-stringify) or a custom function as a key), so that the next time the same schema is used (not necessarily the same object instance) it won't be compiled again. The best performance is achieved when using compiled functions returned by `compile` or `getSchema` methods (there is no additional function call). __Please note__: every time a validation function or `ajv.validate` are called `errors` property is overwritten. You need to copy `errors` array reference to another variable if you want to use it later (e.g., in the callback). See [Validation errors](#validation-errors) ## Using in browser You can require Ajv directly from the code you browserify - in this case Ajv will be a part of your bundle. If you need to use Ajv in several bundles you can create a separate UMD bundle using `npm run bundle` script (thanks to [siddo420](https://github.com/siddo420)). Then you need to load Ajv in the browser: ```html <script src="ajv.min.js"></script> ``` This bundle can be used with different module systems; it creates global `Ajv` if no module system is found. The browser bundle is available on [cdnjs](https://cdnjs.com/libraries/ajv). Ajv is tested with these browsers: [![Sauce Test Status](https://saucelabs.com/browser-matrix/epoberezkin.svg)](https://saucelabs.com/u/epoberezkin) __Please note__: some frameworks, e.g. Dojo, may redefine global require in such way that is not compatible with CommonJS module format. In such case Ajv bundle has to be loaded before the framework and then you can use global Ajv (see issue [#234](https://github.com/epoberezkin/ajv/issues/234)). ## Command line interface CLI is available as a separate npm package [ajv-cli](https://github.com/jessedc/ajv-cli). It supports: - compiling JSON-schemas to test their validity - BETA: generating standalone module exporting a validation function to be used without Ajv (using [ajv-pack](https://github.com/epoberezkin/ajv-pack)) - migrate schemas to draft-06 (using [json-schema-migrate](https://github.com/epoberezkin/json-schema-migrate)) - validating data file(s) against JSON-schema - testing expected validity of data against JSON-schema - referenced schemas - custom meta-schemas - files in JSON and JavaScript format - all Ajv options - reporting changes in data after validation in [JSON-patch](https://tools.ietf.org/html/rfc6902) format ## Validation keywords Ajv supports all validation keywords from draft 4 of JSON-schema standard: - [type](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#type) - [for numbers](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-numbers) - maximum, minimum, exclusiveMaximum, exclusiveMinimum, multipleOf - [for strings](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-strings) - maxLength, minLength, pattern, format - [for arrays](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-arrays) - maxItems, minItems, uniqueItems, items, additionalItems, [contains](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#contains) - [for objects](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-objects) - maxProperties, minProperties, required, properties, patternProperties, additionalProperties, dependencies, [propertyNames](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#propertynames) - [for all types](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-all-types) - enum, [const](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#const) - [compound keywords](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#compound-keywords) - not, oneOf, anyOf, allOf With [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) package Ajv also supports validation keywords from [JSON Schema extension proposals](https://github.com/json-schema/json-schema/wiki/v5-Proposals) for JSON-schema standard: - [switch](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#switch-proposed) - conditional validation with a sequence of if/then clauses - [patternRequired](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#patternrequired-proposed) - like `required` but with patterns that some property should match. - [formatMaximum, formatMinimum, formatExclusiveMaximum, formatExclusiveMinimum](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#formatmaximum--formatminimum-and-exclusiveformatmaximum--exclusiveformatminimum-proposed) - setting limits for date, time, etc. See [JSON Schema validation keywords](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md) for more details. ## Formats The following formats are supported for string validation with "format" keyword: - _date_: full-date according to [RFC3339](http://tools.ietf.org/html/rfc3339#section-5.6). - _time_: time with optional time-zone. - _date-time_: date-time from the same source (time-zone is mandatory). `date`, `time` and `date-time` validate ranges in `full` mode and only regexp in `fast` mode (see [options](#options)). - _uri_: full uri with optional protocol. - _url_: [URL record](https://url.spec.whatwg.org/#concept-url). - _uri-template_: URI template according to [RFC6570](https://tools.ietf.org/html/rfc6570) - _email_: email address. - _hostname_: host name according to [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5). - _ipv4_: IP address v4. - _ipv6_: IP address v6. - _regex_: tests whether a string is a valid regular expression by passing it to RegExp constructor. - _uuid_: Universally Unique IDentifier according to [RFC4122](http://tools.ietf.org/html/rfc4122). - _json-pointer_: JSON-pointer according to [RFC6901](https://tools.ietf.org/html/rfc6901). - _relative-json-pointer_: relative JSON-pointer according to [this draft](http://tools.ietf.org/html/draft-luff-relative-json-pointer-00). There are two modes of format validation: `fast` and `full`. This mode affects formats `date`, `time`, `date-time`, `uri`, `email`, and `hostname`. See [Options](#options) for details. You can add additional formats and replace any of the formats above using [addFormat](#api-addformat) method. The option `unknownFormats` allows changing the default behaviour when an unknown format is encountered. In this case Ajv can either fail schema compilation (default) or ignore it (default in versions before 5.0.0). You also can whitelist specific format(s) to be ignored. See [Options](#options) for details. You can find patterns used for format validation and the sources that were used in [formats.js](https://github.com/epoberezkin/ajv/blob/master/lib/compile/formats.js). ## <a name="ref"></a>Combining schemas with $ref You can structure your validation logic across multiple schema files and have schemas reference each other using `$ref` keyword. Example: ```javascript var schema = { "$id": "http://example.com/schemas/schema.json", "type": "object", "properties": { "foo": { "$ref": "defs.json#/definitions/int" }, "bar": { "$ref": "defs.json#/definitions/str" } } }; var defsSchema = { "$id": "http://example.com/schemas/defs.json", "definitions": { "int": { "type": "integer" }, "str": { "type": "string" } } }; ``` Now to compile your schema you can either pass all schemas to Ajv instance: ```javascript var ajv = new Ajv({schemas: [schema, defsSchema]}); var validate = ajv.getSchema('http://example.com/schemas/schema.json'); ``` or use `addSchema` method: ```javascript var ajv = new Ajv; var validate = ajv.addSchema(defsSchema) .compile(schema); ``` See [Options](#options) and [addSchema](#api) method. __Please note__: - `$ref` is resolved as the uri-reference using schema $id as the base URI (see the example). - References can be recursive (and mutually recursive) to implement the schemas for different data structures (such as linked lists, trees, graphs, etc.). - You don't have to host your schema files at the URIs that you use as schema $id. These URIs are only used to identify the schemas, and according to JSON Schema specification validators should not expect to be able to download the schemas from these URIs. - The actual location of the schema file in the file system is not used. - You can pass the identifier of the schema as the second parameter of `addSchema` method or as a property name in `schemas` option. This identifier can be used instead of (or in addition to) schema $id. - You cannot have the same $id (or the schema identifier) used for more than one schema - the exception will be thrown. - You can implement dynamic resolution of the referenced schemas using `compileAsync` method. In this way you can store schemas in any system (files, web, database, etc.) and reference them without explicitly adding to Ajv instance. See [Asynchronous schema compilation](#asynchronous-schema-compilation). ## $data reference With `$data` option you can use values from the validated data as the values for the schema keywords. See [proposal](https://github.com/json-schema/json-schema/wiki/$data-(v5-proposal)) for more information about how it works. `$data` reference is supported in the keywords: const, enum, format, maximum/minimum, exclusiveMaximum / exclusiveMinimum, maxLength / minLength, maxItems / minItems, maxProperties / minProperties, formatMaximum / formatMinimum, formatExclusiveMaximum / formatExclusiveMinimum, multipleOf, pattern, required, uniqueItems. The value of "$data" should be a [JSON-pointer](https://tools.ietf.org/html/rfc6901) to the data (the root is always the top level data object, even if the $data reference is inside a referenced subschema) or a [relative JSON-pointer](http://tools.ietf.org/html/draft-luff-relative-json-pointer-00) (it is relative to the current point in data; if the $data reference is inside a referenced subschema it cannot point to the data outside of the root level for this subschema). Examples. This schema requires that the value in property `smaller` is less or equal than the value in the property larger: ```javascript var ajv = new Ajv({$data: true}); var schema = { "properties": { "smaller": { "type": "number", "maximum": { "$data": "1/larger" } }, "larger": { "type": "number" } } }; var validData = { smaller: 5, larger: 7 }; ajv.validate(schema, validData); // true ``` This schema requires that the properties have the same format as their field names: ```javascript var schema = { "additionalProperties": { "type": "string", "format": { "$data": "0#" } } }; var validData = { 'date-time': '1963-06-19T08:30:06.283185Z', email: 'joe.bloggs@example.com' } ``` `$data` reference is resolved safely - it won't throw even if some property is undefined. If `$data` resolves to `undefined` the validation succeeds (with the exclusion of `const` keyword). If `$data` resolves to incorrect type (e.g. not "number" for maximum keyword) the validation fails. ## $merge and $patch keywords With the package [ajv-merge-patch](https://github.com/epoberezkin/ajv-merge-patch) you can use the keywords `$merge` and `$patch` that allow extending JSON-schemas with patches using formats [JSON Merge Patch (RFC 7396)](https://tools.ietf.org/html/rfc7396) and [JSON Patch (RFC 6902)](https://tools.ietf.org/html/rfc6902). To add keywords `$merge` and `$patch` to Ajv instance use this code: ```javascript require('ajv-merge-patch')(ajv); ``` Examples. Using `$merge`: ```json { "$merge": { "source": { "type": "object", "properties": { "p": { "type": "string" } }, "additionalProperties": false }, "with": { "properties": { "q": { "type": "number" } } } } } ``` Using `$patch`: ```json { "$patch": { "source": { "type": "object", "properties": { "p": { "type": "string" } }, "additionalProperties": false }, "with": [ { "op": "add", "path": "/properties/q", "value": { "type": "number" } } ] } } ``` The schemas above are equivalent to this schema: ```json { "type": "object", "properties": { "p": { "type": "string" }, "q": { "type": "number" } }, "additionalProperties": false } ``` The properties `source` and `with` in the keywords `$merge` and `$patch` can use absolute or relative `$ref` to point to other schemas previously added to the Ajv instance or to the fragments of the current schema. See the package [ajv-merge-patch](https://github.com/epoberezkin/ajv-merge-patch) for more information. ## Defining custom keywords The advantages of using custom keywords are: - allow creating validation scenarios that cannot be expressed using JSON Schema - simplify your schemas - help bringing a bigger part of the validation logic to your schemas - make your schemas more expressive, less verbose and closer to your application domain - implement custom data processors that modify your data (`modifying` option MUST be used in keyword definition) and/or create side effects while the data is being validated If a keyword is used only for side-effects and its validation result is pre-defined, use option `valid: true/false` in keyword definition to simplify both generated code (no error handling in case of `valid: true`) and your keyword functions (no need to return any validation result). The concerns you have to be aware of when extending JSON-schema standard with custom keywords are the portability and understanding of your schemas. You will have to support these custom keywords on other platforms and to properly document these keywords so that everybody can understand them in your schemas. You can define custom keywords with [addKeyword](#api-addkeyword) method. Keywords are defined on the `ajv` instance level - new instances will not have previously defined keywords. Ajv allows defining keywords with: - validation function - compilation function - macro function - inline compilation function that should return code (as string) that will be inlined in the currently compiled schema. Example. `range` and `exclusiveRange` keywords using compiled schema: ```javascript ajv.addKeyword('range', { type: 'number', compile: function (sch, parentSchema) { var min = sch[0]; var max = sch[1]; return parentSchema.exclusiveRange === true ? function (data) { return data > min && data < max; } : function (data) { return data >= min && data <= max; } } }); var schema = { "range": [2, 4], "exclusiveRange": true }; var validate = ajv.compile(schema); console.log(validate(2.01)); // true console.log(validate(3.99)); // true console.log(validate(2)); // false console.log(validate(4)); // false ``` Several custom keywords (typeof, instanceof, range and propertyNames) are defined in [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) package - they can be used for your schemas and as a starting point for your own custom keywords. See [Defining custom keywords](https://github.com/epoberezkin/ajv/blob/master/CUSTOM.md) for more details. ## Asynchronous schema compilation During asynchronous compilation remote references are loaded using supplied function. See `compileAsync` [method](#api-compileAsync) and `loadSchema` [option](#options). Example: ```javascript var ajv = new Ajv({ loadSchema: loadSchema }); ajv.compileAsync(schema).then(function (validate) { var valid = validate(data); // ... }); function loadSchema(uri) { return request.json(uri).then(function (res) { if (res.statusCode >= 400) throw new Error('Loading error: ' + res.statusCode); return res.body; }); } ``` __Please note__: [Option](#options) `missingRefs` should NOT be set to `"ignore"` or `"fail"` for asynchronous compilation to work. ## Asynchronous validation Example in Node.js REPL: https://tonicdev.com/esp/ajv-asynchronous-validation You can define custom formats and keywords that perform validation asynchronously by accessing database or some other service. You should add `async: true` in the keyword or format definition (see [addFormat](#api-addformat), [addKeyword](#api-addkeyword) and [Defining custom keywords](#defining-custom-keywords)). If your schema uses asynchronous formats/keywords or refers to some schema that contains them it should have `"$async": true` keyword so that Ajv can compile it correctly. If asynchronous format/keyword or reference to asynchronous schema is used in the schema without `$async` keyword Ajv will throw an exception during schema compilation. __Please note__: all asynchronous subschemas that are referenced from the current or other schemas should have `"$async": true` keyword as well, otherwise the schema compilation will fail. Validation function for an asynchronous custom format/keyword should return a promise that resolves with `true` or `false` (or rejects with `new Ajv.ValidationError(errors)` if you want to return custom errors from the keyword function). Ajv compiles asynchronous schemas to either [es7 async functions](http://tc39.github.io/ecmascript-asyncawait/) that can optionally be transpiled with [nodent](https://github.com/MatAtBread/nodent) or with [regenerator](https://github.com/facebook/regenerator) or to [generator functions](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/function*) that can be optionally transpiled with regenerator as well. You can also supply any other transpiler as a function. See [Options](#options). The compiled validation function has `$async: true` property (if the schema is asynchronous), so you can differentiate these functions if you are using both synchronous and asynchronous schemas. If you are using generators, the compiled validation function can be either wrapped with [co](https://github.com/tj/co) (default) or returned as generator function, that can be used directly, e.g. in [koa](http://koajs.com/) 1.0. `co` is a small library, it is included in Ajv (both as npm dependency and in the browser bundle). Async functions are currently supported in Chrome 55, Firefox 52, Node.js 7 (with --harmony-async-await) and MS Edge 13 (with flag). Generator functions are currently supported in Chrome, Firefox and Node.js. If you are using Ajv in other browsers or in older versions of Node.js you should use one of available transpiling options. All provided async modes use global Promise class. If your platform does not have Promise you should use a polyfill that defines it. Validation result will be a promise that resolves with validated data or rejects with an exception `Ajv.ValidationError` that contains the array of validation errors in `errors` property. Example: ```javascript /** * Default mode is non-transpiled generator function wrapped with `co`. * Using package ajv-async (https://github.com/epoberezkin/ajv-async) * you can auto-detect the best async mode. * In this case, without "async" and "transpile" options * (or with option {async: true}) * Ajv will choose the first supported/installed option in this order: * 1. native async function * 2. native generator function wrapped with co * 3. es7 async functions transpiled with nodent * 4. es7 async functions transpiled with regenerator */ var setupAsync = require('ajv-async'); var ajv = setupAsync(new Ajv); ajv.addKeyword('idExists', { async: true, type: 'number', validate: checkIdExists }); function checkIdExists(schema, data) { return knex(schema.table) .select('id') .where('id', data) .then(function (rows) { return !!rows.length; // true if record is found }); } var schema = { "$async": true, "properties": { "userId": { "type": "integer", "idExists": { "table": "users" } }, "postId": { "type": "integer", "idExists": { "table": "posts" } } } }; var validate = ajv.compile(schema); validate({ userId: 1, postId: 19 }) .then(function (data) { console.log('Data is valid', data); // { userId: 1, postId: 19 } }) .catch(function (err) { if (!(err instanceof Ajv.ValidationError)) throw err; // data is invalid console.log('Validation errors:', err.errors); }); ``` ### Using transpilers with asynchronous validation functions. To use a transpiler you should separately install it (or load its bundle in the browser). Ajv npm package includes minified browser bundles of regenerator and nodent in dist folder. #### Using nodent ```javascript var setupAsync = require('ajv-async'); var ajv = new Ajv({ /* async: 'es7', */ transpile: 'nodent' }); setupAsync(ajv); var validate = ajv.compile(schema); // transpiled es7 async function validate(data).then(successFunc).catch(errorFunc); ``` `npm install nodent` or use `nodent.min.js` from dist folder of npm package. #### Using regenerator ```javascript var setupAsync = require('ajv-async'); var ajv = new Ajv({ /* async: 'es7', */ transpile: 'regenerator' }); setupAsync(ajv); var validate = ajv.compile(schema); // transpiled es7 async function validate(data).then(successFunc).catch(errorFunc); ``` `npm install regenerator` or use `regenerator.min.js` from dist folder of npm package. #### Using other transpilers ```javascript var ajv = new Ajv({ async: 'es7', processCode: transpileFunc }); var validate = ajv.compile(schema); // transpiled es7 async function validate(data).then(successFunc).catch(errorFunc); ``` See [Options](#options). #### Comparison of async modes |mode|transpile<br>speed*|run-time<br>speed*|bundle<br>size| |---|:-:|:-:|:-:| |es7 async<br>(native)|-|0.75|-| |generators<br>(native)|-|1.0|-| |es7.nodent|1.35|1.1|215Kb| |es7.regenerator|1.0|2.7|1109Kb| |regenerator|1.0|3.2|1109Kb| \* Relative performance in Node.js 7.x — smaller is better. [nodent](https://github.com/MatAtBread/nodent) has several advantages: - much smaller browser bundle than regenerator - almost the same performance of generated code as native generators in Node.js and the latest Chrome - much better performance than native generators in other browsers - works in IE 9 (regenerator does not) ## Filtering data With [option `removeAdditional`](#options) (added by [andyscott](https://github.com/andyscott)) you can filter data during the validation. This option modifies original data. Example: ```javascript var ajv = new Ajv({ removeAdditional: true }); var schema = { "additionalProperties": false, "properties": { "foo": { "type": "number" }, "bar": { "additionalProperties": { "type": "number" }, "properties": { "baz": { "type": "string" } } } } } var data = { "foo": 0, "additional1": 1, // will be removed; `additionalProperties` == false "bar": { "baz": "abc", "additional2": 2 // will NOT be removed; `additionalProperties` != false }, } var validate = ajv.compile(schema); console.log(validate(data)); // true console.log(data); // { "foo": 0, "bar": { "baz": "abc", "additional2": 2 } ``` If `removeAdditional` option in the example above were `"all"` then both `additional1` and `additional2` properties would have been removed. If the option were `"failing"` then property `additional1` would have been removed regardless of its value and property `additional2` would have been removed only if its value were failing the schema in the inner `additionalProperties` (so in the example above it would have stayed because it passes the schema, but any non-number would have been removed). __Please note__: If you use `removeAdditional` option with `additionalProperties` keyword inside `anyOf`/`oneOf` keywords your validation can fail with this schema, for example: ```json { "type": "object", "oneOf": [ { "properties": { "foo": { "type": "string" } }, "required": [ "foo" ], "additionalProperties": false }, { "properties": { "bar": { "type": "integer" } }, "required": [ "bar" ], "additionalProperties": false } ] } ``` The intention of the schema above is to allow objects with either the string property "foo" or the integer property "bar", but not with both and not with any other properties. With the option `removeAdditional: true` the validation will pass for the object `{ "foo": "abc"}` but will fail for the object `{"bar": 1}`. It happens because while the first subschema in `oneOf` is validated, the property `bar` is removed because it is an additional property according to the standard (because it is not included in `properties` keyword in the same schema). While this behaviour is unexpected (issues [#129](https://github.com/epoberezkin/ajv/issues/129), [#134](https://github.com/epoberezkin/ajv/issues/134)), it is correct. To have the expected behaviour (both objects are allowed and additional properties are removed) the schema has to be refactored in this way: ```json { "type": "object", "properties": { "foo": { "type": "string" }, "bar": { "type": "integer" } }, "additionalProperties": false, "oneOf": [ { "required": [ "foo" ] }, { "required": [ "bar" ] } ] } ``` The schema above is also more efficient - it will compile into a faster function. ## Assigning defaults With [option `useDefaults`](#options) Ajv will assign values from `default` keyword in the schemas of `properties` and `items` (when it is the array of schemas) to the missing properties and items. This option modifies original data. __Please note__: by default the default value is inserted in the generated validation code as a literal (starting from v4.0), so the value inserted in the data will be the deep clone of the default in the schema. If you need to insert the default value in the data by reference pass the option `useDefaults: "shared"`. Inserting defaults by reference can be faster (in case you have an object in `default`) and it allows to have dynamic values in defaults, e.g. timestamp, without recompiling the schema. The side effect is that modifying the default value in any validated data instance will change the default in the schema and in other validated data instances. See example 3 below. Example 1 (`default` in `properties`): ```javascript var ajv = new Ajv({ useDefaults: true }); var schema = { "type": "object", "properties": { "foo": { "type": "number" }, "bar": { "type": "string", "default": "baz" } }, "required": [ "foo", "bar" ] }; var data = { "foo": 1 }; var validate = ajv.compile(schema); console.log(validate(data)); // true console.log(data); // { "foo": 1, "bar": "baz" } ``` Example 2 (`default` in `items`): ```javascript var schema = { "type": "array", "items": [ { "type": "number" }, { "type": "string", "default": "foo" } ] } var data = [ 1 ]; var validate = ajv.compile(schema); console.log(validate(data)); // true console.log(data); // [ 1, "foo" ] ``` Example 3 (inserting "defaults" by reference): ```javascript var ajv = new Ajv({ useDefaults: 'shared' }); var schema = { properties: { foo: { default: { bar: 1 } } } } var validate = ajv.compile(schema); var data = {}; console.log(validate(data)); // true console.log(data); // { foo: { bar: 1 } } data.foo.bar = 2; var data2 = {}; console.log(validate(data2)); // true console.log(data2); // { foo: { bar: 2 } } ``` `default` keywords in other cases are ignored: - not in `properties` or `items` subschemas - in schemas inside `anyOf`, `oneOf` and `not` (see [#42](https://github.com/epoberezkin/ajv/issues/42)) - in `if` subschema of `switch` keyword - in schemas generated by custom macro keywords ## Coercing data types When you are validating user inputs all your data properties are usually strings. The option `coerceTypes` allows you to have your data types coerced to the types specified in your schema `type` keywords, both to pass the validation and to use the correctly typed data afterwards. This option modifies original data. __Please note__: if you pass a scalar value to the validating function its type will be coerced and it will pass the validation, but the value of the variable you pass won't be updated because scalars are passed by value. Example 1: ```javascript var ajv = new Ajv({ coerceTypes: true }); var schema = { "type": "object", "properties": { "foo": { "type": "number" }, "bar": { "type": "boolean" } }, "required": [ "foo", "bar" ] }; var data = { "foo": "1", "bar": "false" }; var validate = ajv.compile(schema); console.log(validate(data)); // true console.log(data); // { "foo": 1, "bar": false } ``` Example 2 (array coercions): ```javascript var ajv = new Ajv({ coerceTypes: 'array' }); var schema = { "properties": { "foo": { "type": "array", "items": { "type": "number" } }, "bar": { "type": "boolean" } } }; var data = { "foo": "1", "bar": ["false"] }; var validate = ajv.compile(schema); console.log(validate(data)); // true console.log(data); // { "foo": [1], "bar": false } ``` The coercion rules, as you can see from the example, are different from JavaScript both to validate user input as expected and to have the coercion reversible (to correctly validate cases where different types are defined in subschemas of "anyOf" and other compound keywords). See [Coercion rules](https://github.com/epoberezkin/ajv/blob/master/COERCION.md) for details. ## API ##### new Ajv(Object options) -&gt; Object Create Ajv instance. ##### .compile(Object schema) -&gt; Function&lt;Object data&gt; Generate validating function and cache the compiled schema for future use. Validating function returns boolean and has properties `errors` with the errors from the last validation (`null` if there were no errors) and `schema` with the reference to the original schema. Unless the option `validateSchema` is false, the schema will be validated against meta-schema and if schema is invalid the error will be thrown. See [options](#options). ##### <a name="api-compileAsync"></a>.compileAsync(Object schema [, Boolean meta] [, Function callback]) -&gt; Promise Asynchronous version of `compile` method that loads missing remote schemas using asynchronous function in `options.loadSchema`. This function returns a Promise that resolves to a validation function. An optional callback passed to `compileAsync` will be called with 2 parameters: error (or null) and validating function. The returned promise will reject (and the callback will be called with an error) when: - missing schema can't be loaded (`loadSchema` returns a Promise that rejects). - a schema containing a missing reference is loaded, but the reference cannot be resolved. - schema (or some loaded/referenced schema) is invalid. The function compiles schema and loads the first missing schema (or meta-schema) until all missing schemas are loaded. You can asynchronously compile meta-schema by passing `true` as the second parameter. See example in [Asynchronous compilation](#asynchronous-schema-compilation). ##### .validate(Object schema|String key|String ref, data) -&gt; Boolean Validate data using passed schema (it will be compiled and cached). Instead of the schema you can use the key that was previously passed to `addSchema`, the schema id if it was present in the schema or any previously resolved reference. Validation errors will be available in the `errors` property of Ajv instance (`null` if there were no errors). __Please note__: every time this method is called the errors are overwritten so you need to copy them to another variable if you want to use them later. If the schema is asynchronous (has `$async` keyword on the top level) this method returns a Promise. See [Asynchronous validation](#asynchronous-validation). ##### .addSchema(Array&lt;Object&gt;|Object schema [, String key]) -&gt; Ajv Add schema(s) to validator instance. This method does not compile schemas (but it still validates them). Because of that dependencies can be added in any order and circular dependencies are supported. It also prevents unnecessary compilation of schemas that are containers for other schemas but not used as a whole. Array of schemas can be passed (schemas should have ids), the second parameter will be ignored. Key can be passed that can be used to reference the schema and will be used as the schema id if there is no id inside the schema. If the key is not passed, the schema id will be used as the key. Once the schema is added, it (and all the references inside it) can be referenced in other schemas and used to validate data. Although `addSchema` does not compile schemas, explicit compilation is not required - the schema will be compiled when it is used first time. By default the schema is validated against meta-schema before it is added, and if the schema does not pass validation the exception is thrown. This behaviour is controlled by `validateSchema` option. __Please note__: Ajv uses the [method chaining syntax](https://en.wikipedia.org/wiki/Method_chaining) for all methods with the prefix `add*` and `remove*`. This allows you to do nice things like the following. ```javascript var validate = new Ajv().addSchema(schema).addFormat(name, regex).getSchema(uri); ``` ##### .addMetaSchema(Array&lt;Object&gt;|Object schema [, String key]) -&gt; Ajv Adds meta schema(s) that can be used to validate other schemas. That function should be used instead of `addSchema` because there may be instance options that would compile a meta schema incorrectly (at the moment it is `removeAdditional` option). There is no need to explicitly add draft 6 meta schema (http://json-schema.org/draft-06/schema and http://json-schema.org/schema) - it is added by default, unless option `meta` is set to `false`. You only need to use it if you have a changed meta-schema that you want to use to validate your schemas. See `validateSchema`. ##### <a name="api-validateschema"></a>.validateSchema(Object schema) -&gt; Boolean Validates schema. This method should be used to validate schemas rather than `validate` due to the inconsistency of `uri` format in JSON Schema standard. By default this method is called automatically when the schema is added, so you rarely need to use it directly. If schema doesn't have `$schema` property, it is validated against draft 6 meta-schema (option `meta` should not be false). If schema has `$schema` property, then the schema with this id (that should be previously added) is used to validate passed schema. Errors will be available at `ajv.errors`. ##### .getSchema(String key) -&gt; Function&lt;Object data&gt; Retrieve compiled schema previously added with `addSchema` by the key passed to `addSchema` or by its full reference (id). The returned validating function has `schema` property with the reference to the original schema. ##### .removeSchema([Object schema|String key|String ref|RegExp pattern]) -&gt; Ajv Remove added/cached schema. Even if schema is referenced by other schemas it can be safely removed as dependent schemas have local references. Schema can be removed using: - key passed to `addSchema` - it's full reference (id) - RegExp that should match schema id or key (meta-schemas won't be removed) - actual schema object that will be stable-stringified to remove schema from cache If no parameter is passed all schemas but meta-schemas will be removed and the cache will be cleared. ##### <a name="api-addformat"></a>.addFormat(String name, String|RegExp|Function|Object format) -&gt; Ajv Add custom format to validate strings or numbers. It can also be used to replace pre-defined formats for Ajv instance. Strings are converted to RegExp. Function should return validation result as `true` or `false`. If object is passed it should have properties `validate`, `compare` and `async`: - _validate_: a string, RegExp or a function as described above. - _compare_: an optional comparison function that accepts two strings and compares them according to the format meaning. This function is used with keywords `formatMaximum`/`formatMinimum` (defined in [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) package). It should return `1` if the first value is bigger than the second value, `-1` if it is smaller and `0` if it is equal. - _async_: an optional `true` value if `validate` is an asynchronous function; in this case it should return a promise that resolves with a value `true` or `false`. - _type_: an optional type of data that the format applies to. It can be `"string"` (default) or `"number"` (see https://github.com/epoberezkin/ajv/issues/291#issuecomment-259923858). If the type of data is different, the validation will pass. Custom formats can be also added via `formats` option. ##### <a name="api-addkeyword"></a>.addKeyword(String keyword, Object definition) -&gt; Ajv Add custom validation keyword to Ajv instance. Keyword should be different from all standard JSON schema keywords and different from previously defined keywords. There is no way to redefine keywords or to remove keyword definition from the instance. Keyword must start with a letter, `_` or `$`, and may continue with letters, numbers, `_`, `$`, or `-`. It is recommended to use an application-specific prefix for keywords to avoid current and future name collisions. Example Keywords: - `"xyz-example"`: valid, and uses prefix for the xyz project to avoid name collisions. - `"example"`: valid, but not recommended as it could collide with future versions of JSON schema etc. - `"3-example"`: invalid as numbers are not allowed to be the first character in a keyword Keyword definition is an object with the following properties: - _type_: optional string or array of strings with data type(s) that the keyword applies to. If not present, the keyword will apply to all types. - _validate_: validating function - _compile_: compiling function - _macro_: macro function - _inline_: compiling function that returns code (as string) - _schema_: an optional `false` value used with "validate" keyword to not pass schema - _metaSchema_: an optional meta-schema for keyword schema - _modifying_: `true` MUST be passed if keyword modifies data - _valid_: pass `true`/`false` to pre-define validation result, the result returned from validation function will be ignored. This option cannot be used with macro keywords. - _$data_: an optional `true` value to support [$data reference](#data-reference) as the value of custom keyword. The reference will be resolved at validation time. If the keyword has meta-schema it would be extended to allow $data and it will be used to validate the resolved value. Supporting $data reference requires that keyword has validating function (as the only option or in addition to compile, macro or inline function). - _async_: an optional `true` value if the validation function is asynchronous (whether it is compiled or passed in _validate_ property); in this case it should return a promise that resolves with a value `true` or `false`. This option is ignored in case of "macro" and "inline" keywords. - _errors_: an optional boolean indicating whether keyword returns errors. If this property is not set Ajv will determine if the errors were set in case of failed validation. _compile_, _macro_ and _inline_ are mutually exclusive, only one should be used at a time. _validate_ can be used separately or in addition to them to support $data reference. __Please note__: If the keyword is validating data type that is different from the type(s) in its definition, the validation function will not be called (and expanded macro will not be used), so there is no need to check for data type inside validation function or inside schema returned by macro function (unless you want to enforce a specific type and for some reason do not want to use a separate `type` keyword for that). In the same way as standard keywords work, if the keyword does not apply to the data type being validated, the validation of this keyword will succeed. See [Defining custom keywords](#defining-custom-keywords) for more details. ##### .getKeyword(String keyword) -&gt; Object|Boolean Returns custom keyword definition, `true` for pre-defined keywords and `false` if the keyword is unknown. ##### .removeKeyword(String keyword) -&gt; Ajv Removes custom or pre-defined keyword so you can redefine them. While this method can be used to extend pre-defined keywords, it can also be used to completely change their meaning - it may lead to unexpected results. __Please note__: schemas compiled before the keyword is removed will continue to work without changes. To recompile schemas use `removeSchema` method and compile them again. ##### .errorsText([Array&lt;Object&gt; errors [, Object options]]) -&gt; String Returns the text with all errors in a String. Options can have properties `separator` (string used to separate errors, ", " by default) and `dataVar` (the variable name that dataPaths are prefixed with, "data" by default). ## Options Defaults: ```javascript { // validation and reporting options: $data: false, allErrors: false, verbose: false, jsonPointers: false, uniqueItems: true, unicode: true, format: 'fast', formats: {}, unknownFormats: true, schemas: {}, logger: undefined, // referenced schema options: schemaId: undefined // recommended '$id' missingRefs: true, extendRefs: 'ignore', // recommended 'fail' loadSchema: undefined, // function(uri: string): Promise {} // options to modify validated data: removeAdditional: false, useDefaults: false, coerceTypes: false, // asynchronous validation options: async: 'co*', transpile: undefined, // requires ajv-async package // advanced options: meta: true, validateSchema: true, addUsedSchema: true, inlineRefs: true, passContext: false, loopRequired: Infinity, ownProperties: false, multipleOfPrecision: false, errorDataPath: 'object', messages: true, sourceCode: false, processCode: undefined, // function (str: string): string {} cache: new Cache, serialize: undefined } ``` ##### Validation and reporting options - _$data_: support [$data references](#data-reference). Draft 6 meta-schema that is added by default will be extended to allow them. If you want to use another meta-schema you need to use $dataMetaSchema method to add support for $data reference. See [API](#api). - _allErrors_: check all rules collecting all errors. Default is to return after the first error. - _verbose_: include the reference to the part of the schema (`schema` and `parentSchema`) and validated data in errors (false by default). - _jsonPointers_: set `dataPath` property of errors using [JSON Pointers](https://tools.ietf.org/html/rfc6901) instead of JavaScript property access notation. - _uniqueItems_: validate `uniqueItems` keyword (true by default). - _unicode_: calculate correct length of strings with unicode pairs (true by default). Pass `false` to use `.length` of strings that is faster, but gives "incorrect" lengths of strings with unicode pairs - each unicode pair is counted as two characters. - _format_: formats validation mode ('fast' by default). Pass 'full' for more correct and slow validation or `false` not to validate formats at all. E.g., 25:00:00 and 2015/14/33 will be invalid time and date in 'full' mode but it will be valid in 'fast' mode. - _formats_: an object with custom formats. Keys and values will be passed to `addFormat` method. - _unknownFormats_: handling of unknown formats. Option values: - `true` (default) - if an unknown format is encountered the exception is thrown during schema compilation. If `format` keyword value is [$data reference](#data-reference) and it is unknown the validation will fail. - `[String]` - an array of unknown format names that will be ignored. This option can be used to allow usage of third party schemas with format(s) for which you don't have definitions, but still fail if another unknown format is used. If `format` keyword value is [$data reference](#data-reference) and it is not in this array the validation will fail. - `"ignore"` - to log warning during schema compilation and always pass validation (the default behaviour in versions before 5.0.0). This option is not recommended, as it allows to mistype format name and it won't be validated without any error message. This behaviour is required by JSON-schema specification. - _schemas_: an array or object of schemas that will be added to the instance. In case you pass the array the schemas must have IDs in them. When the object is passed the method `addSchema(value, key)` will be called for each schema in this object. - _logger_: sets the logging method. Default is the global `console` object that should have methods `log`, `warn` and `error`. Option values: - custom logger - it should have methods `log`, `warn` and `error`. If any of these methods is missing an exception will be thrown. - `false` - logging is disabled. ##### Referenced schema options - _schemaId_: this option defines which keywords are used as schema URI. Option value: - `"$id"` (recommended) - only use `$id` keyword as schema URI (as specified in JSON Schema draft-06), ignore `id` keyword (if it is present a warning will be logged). - `"id"` - only use `id` keyword as schema URI (as specified in JSON Schema draft-04), ignore `$id` keyword (if it is present a warning will be logged). - `undefined` (default) - use both `$id` and `id` keywords as schema URI. If both are present (in the same schema object) and different the exception will be thrown during schema compilation. - _missingRefs_: handling of missing referenced schemas. Option values: - `true` (default) - if the reference cannot be resolved during compilation the exception is thrown. The thrown error has properties `missingRef` (with hash fragment) and `missingSchema` (without it). Both properties are resolved relative to the current base id (usually schema id, unless it was substituted). - `"ignore"` - to log error during compilation and always pass validation. - `"fail"` - to log error and successfully compile schema but fail validation if this rule is checked. - _extendRefs_: validation of other keywords when `$ref` is present in the schema. Option values: - `"ignore"` (default) - when `$ref` is used other keywords are ignored (as per [JSON Reference](https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03#section-3) standard). A warning will be logged during the schema compilation. - `"fail"` (recommended) - if other validation keywords are used together with `$ref` the exception will be thrown when the schema is compiled. This option is recommended to make sure schema has no keywords that are ignored, which can be confusing. - `true` - validate all keywords in the schemas with `$ref` (the default behaviour in versions before 5.0.0). - _loadSchema_: asynchronous function that will be used to load remote schemas when `compileAsync` [method](#api-compileAsync) is used and some reference is missing (option `missingRefs` should NOT be 'fail' or 'ignore'). This function should accept remote schema uri as a parameter and return a Promise that resolves to a schema. See example in [Asynchronous compilation](#asynchronous-schema-compilation). ##### Options to modify validated data - _removeAdditional_: remove additional properties - see example in [Filtering data](#filtering-data). This option is not used if schema is added with `addMetaSchema` method. Option values: - `false` (default) - not to remove additional properties - `"all"` - all additional properties are removed, regardless of `additionalProperties` keyword in schema (and no validation is made for them). - `true` - only additional properties with `additionalProperties` keyword equal to `false` are removed. - `"failing"` - additional properties that fail schema validation will be removed (where `additionalProperties` keyword is `false` or schema). - _useDefaults_: replace missing properties and items with the values from corresponding `default` keywords. Default behaviour is to ignore `default` keywords. This option is not used if schema is added with `addMetaSchema` method. See examples in [Assigning defaults](#assigning-defaults). Option values: - `false` (default) - do not use defaults - `true` - insert defaults by value (safer and slower, object literal is used). - `"shared"` - insert defaults by reference (faster). If the default is an object, it will be shared by all instances of validated data. If you modify the inserted default in the validated data, it will be modified in the schema as well. - _coerceTypes_: change data type of data to match `type` keyword. See the example in [Coercing data types](#coercing-data-types) and [coercion rules](https://github.com/epoberezkin/ajv/blob/master/COERCION.md). Option values: - `false` (default) - no type coercion. - `true` - coerce scalar data types. - `"array"` - in addition to coercions between scalar types, coerce scalar data to an array with one element and vice versa (as required by the schema). ##### Asynchronous validation options - _async_: determines how Ajv compiles asynchronous schemas (see [Asynchronous validation](#asynchronous-validation)) to functions. Option values: - `"*"` / `"co*"` (default) - compile to generator function ("co*" - wrapped with `co.wrap`). If generators are not supported and you don't provide `processCode` option (or `transpile` option if you use [ajv-async](https://github.com/epoberezkin/ajv-async) package), the exception will be thrown when async schema is compiled. - `"es7"` - compile to es7 async function. Unless your platform supports them you need to provide `processCode` or `transpile` option. According to [compatibility table](http://kangax.github.io/compat-table/es7/)) async functions are supported by: - Firefox 52, - Chrome 55, - Node.js 7 (with `--harmony-async-await`), - MS Edge 13 (with flag). - `undefined`/`true` - auto-detect async mode. It requires [ajv-async](https://github.com/epoberezkin/ajv-async) package. If `transpile` option is not passed, ajv-async will choose the first of supported/installed async/transpile modes in this order: - "es7" (native async functions), - "co*" (native generators with co.wrap), - "es7"/"nodent", - "co*"/"regenerator" during the creation of the Ajv instance. If none of the options is available the exception will be thrown. - _transpile_: Requires [ajv-async](https://github.com/epoberezkin/ajv-async) package. It determines whether Ajv transpiles compiled asynchronous validation function. Option values: - `"nodent"` - transpile with [nodent](https://github.com/MatAtBread/nodent). If nodent is not installed, the exception will be thrown. nodent can only transpile es7 async functions; it will enforce this mode. - `"regenerator"` - transpile with [regenerator](https://github.com/facebook/regenerator). If regenerator is not installed, the exception will be thrown. - a function - this function should accept the code of validation function as a string and return transpiled code. This option allows you to use any other transpiler you prefer. If you are passing a function, you can simply pass it to `processCode` option without using ajv-async. ##### Advanced options - _meta_: add [meta-schema](http://json-schema.org/documentation.html) so it can be used by other schemas (true by default). If an object is passed, it will be used as the default meta-schema for schemas that have no `$schema` keyword. This default meta-schema MUST have `$schema` keyword. - _validateSchema_: validate added/compiled schemas against meta-schema (true by default). `$schema` property in the schema can either be http://json-schema.org/schema or http://json-schema.org/draft-04/schema or absent (draft-4 meta-schema will be used) or can be a reference to the schema previously added with `addMetaSchema` method. Option values: - `true` (default) - if the validation fails, throw the exception. - `"log"` - if the validation fails, log error. - `false` - skip schema validation. - _addUsedSchema_: by default methods `compile` and `validate` add schemas to the instance if they have `$id` (or `id`) property that doesn't start with "#". If `$id` is present and it is not unique the exception will be thrown. Set this option to `false` to skip adding schemas to the instance and the `$id` uniqueness check when these methods are used. This option does not affect `addSchema` method. - _inlineRefs_: Affects compilation of referenced schemas. Option values: - `true` (default) - the referenced schemas that don't have refs in them are inlined, regardless of their size - that substantially improves performance at the cost of the bigger size of compiled schema functions. - `false` - to not inline referenced schemas (they will be compiled as separate functions). - integer number - to limit the maximum number of keywords of the schema that will be inlined. - _passContext_: pass validation context to custom keyword functions. If this option is `true` and you pass some context to the compiled validation function with `validate.call(context, data)`, the `context` will be available as `this` in your custom keywords. By default `this` is Ajv instance. - _loopRequired_: by default `required` keyword is compiled into a single expression (or a sequence of statements in `allErrors` mode). In case of a very large number of properties in this keyword it may result in a very big validation function. Pass integer to set the number of properties above which `required` keyword will be validated in a loop - smaller validation function size but also worse performance. - _ownProperties_: by default Ajv iterates over all enumerable object properties; when this option is `true` only own enumerable object properties (i.e. found directly on the object rather than on its prototype) are iterated. Contributed by @mbroadst. - _multipleOfPrecision_: by default `multipleOf` keyword is validated by comparing the result of division with parseInt() of that result. It works for dividers that are bigger than 1. For small dividers such as 0.01 the result of the division is usually not integer (even when it should be integer, see issue [#84](https://github.com/epoberezkin/ajv/issues/84)). If you need to use fractional dividers set this option to some positive integer N to have `multipleOf` validated using this formula: `Math.abs(Math.round(division) - division) < 1e-N` (it is slower but allows for float arithmetics deviations). - _errorDataPath_: set `dataPath` to point to 'object' (default) or to 'property' when validating keywords `required`, `additionalProperties` and `dependencies`. - _messages_: Include human-readable messages in errors. `true` by default. `false` can be passed when custom messages are used (e.g. with [ajv-i18n](https://github.com/epoberezkin/ajv-i18n)). - _sourceCode_: add `sourceCode` property to validating function (for debugging; this code can be different from the result of toString call). - _processCode_: an optional function to process generated code before it is passed to Function constructor. It can be used to either beautify (the validating function is generated without line-breaks) or to transpile code. Starting from version 5.0.0 this option replaced options: - `beautify` that formatted the generated function using [js-beautify](https://github.com/beautify-web/js-beautify). If you want to beautify the generated code pass `require('js-beautify').js_beautify`. - `transpile` that transpiled asynchronous validation function. You can still use `transpile` option with [ajv-async](https://github.com/epoberezkin/ajv-async) package. See [Asynchronous validation](#asynchronous-validation) for more information. - _cache_: an optional instance of cache to store compiled schemas using stable-stringified schema as a key. For example, set-associative cache [sacjs](https://github.com/epoberezkin/sacjs) can be used. If not passed then a simple hash is used which is good enough for the common use case (a limited number of statically defined schemas). Cache should have methods `put(key, value)`, `get(key)`, `del(key)` and `clear()`. - _serialize_: an optional function to serialize schema to cache key. Pass `false` to use schema itself as a key (e.g., if WeakMap used as a cache). By default [fast-json-stable-stringify](https://github.com/epoberezkin/fast-json-stable-stringify) is used. ## Validation errors In case of validation failure, Ajv assigns the array of errors to `errors` property of validation function (or to `errors` property of Ajv instance when `validate` or `validateSchema` methods were called). In case of [asynchronous validation](#asynchronous-validation), the returned promise is rejected with exception `Ajv.ValidationError` that has `errors` property. ### Error objects Each error is an object with the following properties: - _keyword_: validation keyword. - _dataPath_: the path to the part of the data that was validated. By default `dataPath` uses JavaScript property access notation (e.g., `".prop[1].subProp"`). When the option `jsonPointers` is true (see [Options](#options)) `dataPath` will be set using JSON pointer standard (e.g., `"/prop/1/subProp"`). - _schemaPath_: the path (JSON-pointer as a URI fragment) to the schema of the keyword that failed validation. - _params_: the object with the additional information about error that can be used to create custom error messages (e.g., using [ajv-i18n](https://github.com/epoberezkin/ajv-i18n) package). See below for parameters set by all keywords. - _message_: the standard error message (can be excluded with option `messages` set to false). - _schema_: the schema of the keyword (added with `verbose` option). - _parentSchema_: the schema containing the keyword (added with `verbose` option) - _data_: the data validated by the keyword (added with `verbose` option). __Please note__: `propertyNames` keyword schema validation errors have an additional property `propertyName`, `dataPath` points to the object. After schema validation for each property name, if it is invalid an additional error is added with the property `keyword` equal to `"propertyNames"`. ### Error parameters Properties of `params` object in errors depend on the keyword that failed validation. - `maxItems`, `minItems`, `maxLength`, `minLength`, `maxProperties`, `minProperties` - property `limit` (number, the schema of the keyword). - `additionalItems` - property `limit` (the maximum number of allowed items in case when `items` keyword is an array of schemas and `additionalItems` is false). - `additionalProperties` - property `additionalProperty` (the property not used in `properties` and `patternProperties` keywords). - `dependencies` - properties: - `property` (dependent property), - `missingProperty` (required missing dependency - only the first one is reported currently) - `deps` (required dependencies, comma separated list as a string), - `depsCount` (the number of required dependencies). - `format` - property `format` (the schema of the keyword). - `maximum`, `minimum` - properties: - `limit` (number, the schema of the keyword), - `exclusive` (boolean, the schema of `exclusiveMaximum` or `exclusiveMinimum`), - `comparison` (string, comparison operation to compare the data to the limit, with the data on the left and the limit on the right; can be "<", "<=", ">", ">=") - `multipleOf` - property `multipleOf` (the schema of the keyword) - `pattern` - property `pattern` (the schema of the keyword) - `required` - property `missingProperty` (required property that is missing). - `propertyNames` - property `propertyName` (an invalid property name). - `patternRequired` (in ajv-keywords) - property `missingPattern` (required pattern that did not match any property). - `type` - property `type` (required type(s), a string, can be a comma-separated list) - `uniqueItems` - properties `i` and `j` (indices of duplicate items). - `enum` - property `allowedValues` pointing to the array of values (the schema of the keyword). - `$ref` - property `ref` with the referenced schema URI. - custom keywords (in case keyword definition doesn't create errors) - property `keyword` (the keyword name). ## Related packages - [ajv-async](https://github.com/epoberezkin/ajv-async) - configure async validation mode - [ajv-cli](https://github.com/jessedc/ajv-cli) - command line interface - [ajv-errors](https://github.com/epoberezkin/ajv-errors) - custom error messages - [ajv-i18n](https://github.com/epoberezkin/ajv-i18n) - internationalised error messages - [ajv-istanbul](https://github.com/epoberezkin/ajv-istanbul) - instrument generated validation code to measure test coverage of your schemas - [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) - custom validation keywords (if/then/else, select, typeof, etc.) - [ajv-merge-patch](https://github.com/epoberezkin/ajv-merge-patch) - keywords $merge and $patch - [ajv-pack](https://github.com/epoberezkin/ajv-pack) - produces a compact module exporting validation functions ## Some packages using Ajv - [webpack](https://github.com/webpack/webpack) - a module bundler. Its main purpose is to bundle JavaScript files for usage in a browser - [jsonscript-js](https://github.com/JSONScript/jsonscript-js) - the interpreter for [JSONScript](http://www.jsonscript.org) - scripted processing of existing endpoints and services - [osprey-method-handler](https://github.com/mulesoft-labs/osprey-method-handler) - Express middleware for validating requests and responses based on a RAML method object, used in [osprey](https://github.com/mulesoft/osprey) - validating API proxy generated from a RAML definition - [har-validator](https://github.com/ahmadnassri/har-validator) - HTTP Archive (HAR) validator - [jsoneditor](https://github.com/josdejong/jsoneditor) - a web-based tool to view, edit, format, and validate JSON http://jsoneditoronline.org - [JSON Schema Lint](https://github.com/nickcmaynard/jsonschemalint) - a web tool to validate JSON/YAML document against a single JSON-schema http://jsonschemalint.com - [objection](https://github.com/vincit/objection.js) - SQL-friendly ORM for Node.js - [table](https://github.com/gajus/table) - formats data into a string table - [ripple-lib](https://github.com/ripple/ripple-lib) - a JavaScript API for interacting with [Ripple](https://ripple.com) in Node.js and the browser - [restbase](https://github.com/wikimedia/restbase) - distributed storage with REST API & dispatcher for backend services built to provide a low-latency & high-throughput API for Wikipedia / Wikimedia content - [hippie-swagger](https://github.com/CacheControl/hippie-swagger) - [Hippie](https://github.com/vesln/hippie) wrapper that provides end to end API testing with swagger validation - [react-form-controlled](https://github.com/seeden/react-form-controlled) - React controlled form components with validation - [rabbitmq-schema](https://github.com/tjmehta/rabbitmq-schema) - a schema definition module for RabbitMQ graphs and messages - [@query/schema](https://www.npmjs.com/package/@query/schema) - stream filtering with a URI-safe query syntax parsing to JSON Schema - [chai-ajv-json-schema](https://github.com/peon374/chai-ajv-json-schema) - chai plugin to us JSON-schema with expect in mocha tests - [grunt-jsonschema-ajv](https://github.com/SignpostMarv/grunt-jsonschema-ajv) - Grunt plugin for validating files against JSON Schema - [extract-text-webpack-plugin](https://github.com/webpack-contrib/extract-text-webpack-plugin) - extract text from bundle into a file - [electron-builder](https://github.com/electron-userland/electron-builder) - a solution to package and build a ready for distribution Electron app - [addons-linter](https://github.com/mozilla/addons-linter) - Mozilla Add-ons Linter - [gh-pages-generator](https://github.com/epoberezkin/gh-pages-generator) - multi-page site generator converting markdown files to GitHub pages ## Tests ``` npm install git submodule update --init npm test ``` ## Contributing All validation functions are generated using doT templates in [dot](https://github.com/epoberezkin/ajv/tree/master/lib/dot) folder. Templates are precompiled so doT is not a run-time dependency. `npm run build` - compiles templates to [dotjs](https://github.com/epoberezkin/ajv/tree/master/lib/dotjs) folder. `npm run watch` - automatically compiles templates when files in dot folder change Please see [Contributing guidelines](https://github.com/epoberezkin/ajv/blob/master/CONTRIBUTING.md) ## Changes history See https://github.com/epoberezkin/ajv/releases __Please note__: [Changes in version 5.0.0](https://github.com/epoberezkin/ajv/releases/tag/5.0.0). [Changes in version 4.6.0](https://github.com/epoberezkin/ajv/releases/tag/4.6.0). [Changes in version 4.0.0](https://github.com/epoberezkin/ajv/releases/tag/4.0.0). [Changes in version 3.0.0](https://github.com/epoberezkin/ajv/releases/tag/3.0.0). [Changes in version 2.0.0](https://github.com/epoberezkin/ajv/releases/tag/2.0.0). ## License [MIT](https://github.com/epoberezkin/ajv/blob/master/LICENSE)
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/ajv/README.md
0.518546
0.878366
README.md
pypi
semver(1) -- The semantic versioner for npm =========================================== ## Install ```bash npm install --save semver ```` ## Usage As a node module: ```js const semver = require('semver') semver.valid('1.2.3') // '1.2.3' semver.valid('a.b.c') // null semver.clean(' =v1.2.3 ') // '1.2.3' semver.satisfies('1.2.3', '1.x || >=2.5.0 || 5.0.0 - 7.2.3') // true semver.gt('1.2.3', '9.8.7') // false semver.lt('1.2.3', '9.8.7') // true semver.valid(semver.coerce('v2')) // '2.0.0' semver.valid(semver.coerce('42.6.7.9.3-alpha')) // '42.6.7' ``` As a command-line utility: ``` $ semver -h A JavaScript implementation of the http://semver.org/ specification Copyright Isaac Z. Schlueter Usage: semver [options] <version> [<version> [...]] Prints valid versions sorted by SemVer precedence Options: -r --range <range> Print versions that match the specified range. -i --increment [<level>] Increment a version by the specified level. Level can be one of: major, minor, patch, premajor, preminor, prepatch, or prerelease. Default level is 'patch'. Only one version may be specified. --preid <identifier> Identifier to be used to prefix premajor, preminor, prepatch or prerelease version increments. -l --loose Interpret versions and ranges loosely -p --include-prerelease Always include prerelease versions in range matching -c --coerce Coerce a string into SemVer if possible (does not imply --loose) Program exits successfully if any valid version satisfies all supplied ranges, and prints all satisfying versions. If no satisfying versions are found, then exits failure. Versions are printed in ascending order, so supplying multiple versions to the utility will just sort them. ``` ## Versions A "version" is described by the `v2.0.0` specification found at <http://semver.org/>. A leading `"="` or `"v"` character is stripped off and ignored. ## Ranges A `version range` is a set of `comparators` which specify versions that satisfy the range. A `comparator` is composed of an `operator` and a `version`. The set of primitive `operators` is: * `<` Less than * `<=` Less than or equal to * `>` Greater than * `>=` Greater than or equal to * `=` Equal. If no operator is specified, then equality is assumed, so this operator is optional, but MAY be included. For example, the comparator `>=1.2.7` would match the versions `1.2.7`, `1.2.8`, `2.5.3`, and `1.3.9`, but not the versions `1.2.6` or `1.1.0`. Comparators can be joined by whitespace to form a `comparator set`, which is satisfied by the **intersection** of all of the comparators it includes. A range is composed of one or more comparator sets, joined by `||`. A version matches a range if and only if every comparator in at least one of the `||`-separated comparator sets is satisfied by the version. For example, the range `>=1.2.7 <1.3.0` would match the versions `1.2.7`, `1.2.8`, and `1.2.99`, but not the versions `1.2.6`, `1.3.0`, or `1.1.0`. The range `1.2.7 || >=1.2.9 <2.0.0` would match the versions `1.2.7`, `1.2.9`, and `1.4.6`, but not the versions `1.2.8` or `2.0.0`. ### Prerelease Tags If a version has a prerelease tag (for example, `1.2.3-alpha.3`) then it will only be allowed to satisfy comparator sets if at least one comparator with the same `[major, minor, patch]` tuple also has a prerelease tag. For example, the range `>1.2.3-alpha.3` would be allowed to match the version `1.2.3-alpha.7`, but it would *not* be satisfied by `3.4.5-alpha.9`, even though `3.4.5-alpha.9` is technically "greater than" `1.2.3-alpha.3` according to the SemVer sort rules. The version range only accepts prerelease tags on the `1.2.3` version. The version `3.4.5` *would* satisfy the range, because it does not have a prerelease flag, and `3.4.5` is greater than `1.2.3-alpha.7`. The purpose for this behavior is twofold. First, prerelease versions frequently are updated very quickly, and contain many breaking changes that are (by the author's design) not yet fit for public consumption. Therefore, by default, they are excluded from range matching semantics. Second, a user who has opted into using a prerelease version has clearly indicated the intent to use *that specific* set of alpha/beta/rc versions. By including a prerelease tag in the range, the user is indicating that they are aware of the risk. However, it is still not appropriate to assume that they have opted into taking a similar risk on the *next* set of prerelease versions. #### Prerelease Identifiers The method `.inc` takes an additional `identifier` string argument that will append the value of the string as a prerelease identifier: ```javascript semver.inc('1.2.3', 'prerelease', 'beta') // '1.2.4-beta.0' ``` command-line example: ```bash $ semver 1.2.3 -i prerelease --preid beta 1.2.4-beta.0 ``` Which then can be used to increment further: ```bash $ semver 1.2.4-beta.0 -i prerelease 1.2.4-beta.1 ``` ### Advanced Range Syntax Advanced range syntax desugars to primitive comparators in deterministic ways. Advanced ranges may be combined in the same way as primitive comparators using white space or `||`. #### Hyphen Ranges `X.Y.Z - A.B.C` Specifies an inclusive set. * `1.2.3 - 2.3.4` := `>=1.2.3 <=2.3.4` If a partial version is provided as the first version in the inclusive range, then the missing pieces are replaced with zeroes. * `1.2 - 2.3.4` := `>=1.2.0 <=2.3.4` If a partial version is provided as the second version in the inclusive range, then all versions that start with the supplied parts of the tuple are accepted, but nothing that would be greater than the provided tuple parts. * `1.2.3 - 2.3` := `>=1.2.3 <2.4.0` * `1.2.3 - 2` := `>=1.2.3 <3.0.0` #### X-Ranges `1.2.x` `1.X` `1.2.*` `*` Any of `X`, `x`, or `*` may be used to "stand in" for one of the numeric values in the `[major, minor, patch]` tuple. * `*` := `>=0.0.0` (Any version satisfies) * `1.x` := `>=1.0.0 <2.0.0` (Matching major version) * `1.2.x` := `>=1.2.0 <1.3.0` (Matching major and minor versions) A partial version range is treated as an X-Range, so the special character is in fact optional. * `""` (empty string) := `*` := `>=0.0.0` * `1` := `1.x.x` := `>=1.0.0 <2.0.0` * `1.2` := `1.2.x` := `>=1.2.0 <1.3.0` #### Tilde Ranges `~1.2.3` `~1.2` `~1` Allows patch-level changes if a minor version is specified on the comparator. Allows minor-level changes if not. * `~1.2.3` := `>=1.2.3 <1.(2+1).0` := `>=1.2.3 <1.3.0` * `~1.2` := `>=1.2.0 <1.(2+1).0` := `>=1.2.0 <1.3.0` (Same as `1.2.x`) * `~1` := `>=1.0.0 <(1+1).0.0` := `>=1.0.0 <2.0.0` (Same as `1.x`) * `~0.2.3` := `>=0.2.3 <0.(2+1).0` := `>=0.2.3 <0.3.0` * `~0.2` := `>=0.2.0 <0.(2+1).0` := `>=0.2.0 <0.3.0` (Same as `0.2.x`) * `~0` := `>=0.0.0 <(0+1).0.0` := `>=0.0.0 <1.0.0` (Same as `0.x`) * `~1.2.3-beta.2` := `>=1.2.3-beta.2 <1.3.0` Note that prereleases in the `1.2.3` version will be allowed, if they are greater than or equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but `1.2.4-beta.2` would not, because it is a prerelease of a different `[major, minor, patch]` tuple. #### Caret Ranges `^1.2.3` `^0.2.5` `^0.0.4` Allows changes that do not modify the left-most non-zero digit in the `[major, minor, patch]` tuple. In other words, this allows patch and minor updates for versions `1.0.0` and above, patch updates for versions `0.X >=0.1.0`, and *no* updates for versions `0.0.X`. Many authors treat a `0.x` version as if the `x` were the major "breaking-change" indicator. Caret ranges are ideal when an author may make breaking changes between `0.2.4` and `0.3.0` releases, which is a common practice. However, it presumes that there will *not* be breaking changes between `0.2.4` and `0.2.5`. It allows for changes that are presumed to be additive (but non-breaking), according to commonly observed practices. * `^1.2.3` := `>=1.2.3 <2.0.0` * `^0.2.3` := `>=0.2.3 <0.3.0` * `^0.0.3` := `>=0.0.3 <0.0.4` * `^1.2.3-beta.2` := `>=1.2.3-beta.2 <2.0.0` Note that prereleases in the `1.2.3` version will be allowed, if they are greater than or equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but `1.2.4-beta.2` would not, because it is a prerelease of a different `[major, minor, patch]` tuple. * `^0.0.3-beta` := `>=0.0.3-beta <0.0.4` Note that prereleases in the `0.0.3` version *only* will be allowed, if they are greater than or equal to `beta`. So, `0.0.3-pr.2` would be allowed. When parsing caret ranges, a missing `patch` value desugars to the number `0`, but will allow flexibility within that value, even if the major and minor versions are both `0`. * `^1.2.x` := `>=1.2.0 <2.0.0` * `^0.0.x` := `>=0.0.0 <0.1.0` * `^0.0` := `>=0.0.0 <0.1.0` A missing `minor` and `patch` values will desugar to zero, but also allow flexibility within those values, even if the major version is zero. * `^1.x` := `>=1.0.0 <2.0.0` * `^0.x` := `>=0.0.0 <1.0.0` ### Range Grammar Putting all this together, here is a Backus-Naur grammar for ranges, for the benefit of parser authors: ```bnf range-set ::= range ( logical-or range ) * logical-or ::= ( ' ' ) * '||' ( ' ' ) * range ::= hyphen | simple ( ' ' simple ) * | '' hyphen ::= partial ' - ' partial simple ::= primitive | partial | tilde | caret primitive ::= ( '<' | '>' | '>=' | '<=' | '=' ) partial partial ::= xr ( '.' xr ( '.' xr qualifier ? )? )? xr ::= 'x' | 'X' | '*' | nr nr ::= '0' | ['1'-'9'] ( ['0'-'9'] ) * tilde ::= '~' partial caret ::= '^' partial qualifier ::= ( '-' pre )? ( '+' build )? pre ::= parts build ::= parts parts ::= part ( '.' part ) * part ::= nr | [-0-9A-Za-z]+ ``` ## Functions All methods and classes take a final `options` object argument. All options in this object are `false` by default. The options supported are: - `loose` Be more forgiving about not-quite-valid semver strings. (Any resulting output will always be 100% strict compliant, of course.) For backwards compatibility reasons, if the `options` argument is a boolean value instead of an object, it is interpreted to be the `loose` param. - `includePrerelease` Set to suppress the [default behavior](https://github.com/npm/node-semver#prerelease-tags) of excluding prerelease tagged versions from ranges unless they are explicitly opted into. Strict-mode Comparators and Ranges will be strict about the SemVer strings that they parse. * `valid(v)`: Return the parsed version, or null if it's not valid. * `inc(v, release)`: Return the version incremented by the release type (`major`, `premajor`, `minor`, `preminor`, `patch`, `prepatch`, or `prerelease`), or null if it's not valid * `premajor` in one call will bump the version up to the next major version and down to a prerelease of that major version. `preminor`, and `prepatch` work the same way. * If called from a non-prerelease version, the `prerelease` will work the same as `prepatch`. It increments the patch version, then makes a prerelease. If the input version is already a prerelease it simply increments it. * `prerelease(v)`: Returns an array of prerelease components, or null if none exist. Example: `prerelease('1.2.3-alpha.1') -> ['alpha', 1]` * `major(v)`: Return the major version number. * `minor(v)`: Return the minor version number. * `patch(v)`: Return the patch version number. * `intersects(r1, r2, loose)`: Return true if the two supplied ranges or comparators intersect. ### Comparison * `gt(v1, v2)`: `v1 > v2` * `gte(v1, v2)`: `v1 >= v2` * `lt(v1, v2)`: `v1 < v2` * `lte(v1, v2)`: `v1 <= v2` * `eq(v1, v2)`: `v1 == v2` This is true if they're logically equivalent, even if they're not the exact same string. You already know how to compare strings. * `neq(v1, v2)`: `v1 != v2` The opposite of `eq`. * `cmp(v1, comparator, v2)`: Pass in a comparison string, and it'll call the corresponding function above. `"==="` and `"!=="` do simple string comparison, but are included for completeness. Throws if an invalid comparison string is provided. * `compare(v1, v2)`: Return `0` if `v1 == v2`, or `1` if `v1` is greater, or `-1` if `v2` is greater. Sorts in ascending order if passed to `Array.sort()`. * `rcompare(v1, v2)`: The reverse of compare. Sorts an array of versions in descending order when passed to `Array.sort()`. * `diff(v1, v2)`: Returns difference between two versions by the release type (`major`, `premajor`, `minor`, `preminor`, `patch`, `prepatch`, or `prerelease`), or null if the versions are the same. ### Comparators * `intersects(comparator)`: Return true if the comparators intersect ### Ranges * `validRange(range)`: Return the valid range or null if it's not valid * `satisfies(version, range)`: Return true if the version satisfies the range. * `maxSatisfying(versions, range)`: Return the highest version in the list that satisfies the range, or `null` if none of them do. * `minSatisfying(versions, range)`: Return the lowest version in the list that satisfies the range, or `null` if none of them do. * `gtr(version, range)`: Return `true` if version is greater than all the versions possible in the range. * `ltr(version, range)`: Return `true` if version is less than all the versions possible in the range. * `outside(version, range, hilo)`: Return true if the version is outside the bounds of the range in either the high or low direction. The `hilo` argument must be either the string `'>'` or `'<'`. (This is the function called by `gtr` and `ltr`.) * `intersects(range)`: Return true if any of the ranges comparators intersect Note that, since ranges may be non-contiguous, a version might not be greater than a range, less than a range, *or* satisfy a range! For example, the range `1.2 <1.2.9 || >2.0.0` would have a hole from `1.2.9` until `2.0.0`, so the version `1.2.10` would not be greater than the range (because `2.0.1` satisfies, which is higher), nor less than the range (since `1.2.8` satisfies, which is lower), and it also does not satisfy the range. If you want to know if a version satisfies or does not satisfy a range, use the `satisfies(version, range)` function. ### Coercion * `coerce(version)`: Coerces a string to semver if possible This aims to provide a very forgiving translation of a non-semver string to semver. It looks for the first digit in a string, and consumes all remaining characters which satisfy at least a partial semver (e.g., `1`, `1.2`, `1.2.3`) up to the max permitted length (256 characters). Longer versions are simply truncated (`4.6.3.9.2-alpha2` becomes `4.6.3`). All surrounding text is simply ignored (`v3.4 replaces v3.3.1` becomes `3.4.0`). Only text which lacks digits will fail coercion (`version one` is not valid). The maximum length for any semver component considered for coercion is 16 characters; longer components will be ignored (`10000000000000000.4.7.4` becomes `4.7.4`). The maximum value for any semver component is `Integer.MAX_SAFE_INTEGER || (2**53 - 1)`; higher value components are invalid (`9999999999999999.4.7.4` is likely invalid).
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/semver/README.md
0.602296
0.786991
README.md
pypi
declare module "safe-buffer" { export class Buffer { length: number write(string: string, offset?: number, length?: number, encoding?: string): number; toString(encoding?: string, start?: number, end?: number): string; toJSON(): { type: 'Buffer', data: any[] }; equals(otherBuffer: Buffer): boolean; compare(otherBuffer: Buffer, targetStart?: number, targetEnd?: number, sourceStart?: number, sourceEnd?: number): number; copy(targetBuffer: Buffer, targetStart?: number, sourceStart?: number, sourceEnd?: number): number; slice(start?: number, end?: number): Buffer; writeUIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; writeUIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; writeIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; writeIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number; readUIntLE(offset: number, byteLength: number, noAssert?: boolean): number; readUIntBE(offset: number, byteLength: number, noAssert?: boolean): number; readIntLE(offset: number, byteLength: number, noAssert?: boolean): number; readIntBE(offset: number, byteLength: number, noAssert?: boolean): number; readUInt8(offset: number, noAssert?: boolean): number; readUInt16LE(offset: number, noAssert?: boolean): number; readUInt16BE(offset: number, noAssert?: boolean): number; readUInt32LE(offset: number, noAssert?: boolean): number; readUInt32BE(offset: number, noAssert?: boolean): number; readInt8(offset: number, noAssert?: boolean): number; readInt16LE(offset: number, noAssert?: boolean): number; readInt16BE(offset: number, noAssert?: boolean): number; readInt32LE(offset: number, noAssert?: boolean): number; readInt32BE(offset: number, noAssert?: boolean): number; readFloatLE(offset: number, noAssert?: boolean): number; readFloatBE(offset: number, noAssert?: boolean): number; readDoubleLE(offset: number, noAssert?: boolean): number; readDoubleBE(offset: number, noAssert?: boolean): number; swap16(): Buffer; swap32(): Buffer; swap64(): Buffer; writeUInt8(value: number, offset: number, noAssert?: boolean): number; writeUInt16LE(value: number, offset: number, noAssert?: boolean): number; writeUInt16BE(value: number, offset: number, noAssert?: boolean): number; writeUInt32LE(value: number, offset: number, noAssert?: boolean): number; writeUInt32BE(value: number, offset: number, noAssert?: boolean): number; writeInt8(value: number, offset: number, noAssert?: boolean): number; writeInt16LE(value: number, offset: number, noAssert?: boolean): number; writeInt16BE(value: number, offset: number, noAssert?: boolean): number; writeInt32LE(value: number, offset: number, noAssert?: boolean): number; writeInt32BE(value: number, offset: number, noAssert?: boolean): number; writeFloatLE(value: number, offset: number, noAssert?: boolean): number; writeFloatBE(value: number, offset: number, noAssert?: boolean): number; writeDoubleLE(value: number, offset: number, noAssert?: boolean): number; writeDoubleBE(value: number, offset: number, noAssert?: boolean): number; fill(value: any, offset?: number, end?: number): this; indexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; lastIndexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number; includes(value: string | number | Buffer, byteOffset?: number, encoding?: string): boolean; /** * Allocates a new buffer containing the given {str}. * * @param str String to store in buffer. * @param encoding encoding to use, optional. Default is 'utf8' */ constructor (str: string, encoding?: string); /** * Allocates a new buffer of {size} octets. * * @param size count of octets to allocate. */ constructor (size: number); /** * Allocates a new buffer containing the given {array} of octets. * * @param array The octets to store. */ constructor (array: Uint8Array); /** * Produces a Buffer backed by the same allocated memory as * the given {ArrayBuffer}. * * * @param arrayBuffer The ArrayBuffer with which to share memory. */ constructor (arrayBuffer: ArrayBuffer); /** * Allocates a new buffer containing the given {array} of octets. * * @param array The octets to store. */ constructor (array: any[]); /** * Copies the passed {buffer} data onto a new {Buffer} instance. * * @param buffer The buffer to copy. */ constructor (buffer: Buffer); prototype: Buffer; /** * Allocates a new Buffer using an {array} of octets. * * @param array */ static from(array: any[]): Buffer; /** * When passed a reference to the .buffer property of a TypedArray instance, * the newly created Buffer will share the same allocated memory as the TypedArray. * The optional {byteOffset} and {length} arguments specify a memory range * within the {arrayBuffer} that will be shared by the Buffer. * * @param arrayBuffer The .buffer property of a TypedArray or a new ArrayBuffer() * @param byteOffset * @param length */ static from(arrayBuffer: ArrayBuffer, byteOffset?: number, length?: number): Buffer; /** * Copies the passed {buffer} data onto a new Buffer instance. * * @param buffer */ static from(buffer: Buffer): Buffer; /** * Creates a new Buffer containing the given JavaScript string {str}. * If provided, the {encoding} parameter identifies the character encoding. * If not provided, {encoding} defaults to 'utf8'. * * @param str */ static from(str: string, encoding?: string): Buffer; /** * Returns true if {obj} is a Buffer * * @param obj object to test. */ static isBuffer(obj: any): obj is Buffer; /** * Returns true if {encoding} is a valid encoding argument. * Valid string encodings in Node 0.12: 'ascii'|'utf8'|'utf16le'|'ucs2'(alias of 'utf16le')|'base64'|'binary'(deprecated)|'hex' * * @param encoding string to test. */ static isEncoding(encoding: string): boolean; /** * Gives the actual byte length of a string. encoding defaults to 'utf8'. * This is not the same as String.prototype.length since that returns the number of characters in a string. * * @param string string to test. * @param encoding encoding used to evaluate (defaults to 'utf8') */ static byteLength(string: string, encoding?: string): number; /** * Returns a buffer which is the result of concatenating all the buffers in the list together. * * If the list has no items, or if the totalLength is 0, then it returns a zero-length buffer. * If the list has exactly one item, then the first item of the list is returned. * If the list has more than one item, then a new Buffer is created. * * @param list An array of Buffer objects to concatenate * @param totalLength Total length of the buffers when concatenated. * If totalLength is not provided, it is read from the buffers in the list. However, this adds an additional loop to the function, so it is faster to provide the length explicitly. */ static concat(list: Buffer[], totalLength?: number): Buffer; /** * The same as buf1.compare(buf2). */ static compare(buf1: Buffer, buf2: Buffer): number; /** * Allocates a new buffer of {size} octets. * * @param size count of octets to allocate. * @param fill if specified, buffer will be initialized by calling buf.fill(fill). * If parameter is omitted, buffer will be filled with zeros. * @param encoding encoding used for call to buf.fill while initalizing */ static alloc(size: number, fill?: string | Buffer | number, encoding?: string): Buffer; /** * Allocates a new buffer of {size} octets, leaving memory not initialized, so the contents * of the newly created Buffer are unknown and may contain sensitive data. * * @param size count of octets to allocate */ static allocUnsafe(size: number): Buffer; /** * Allocates a new non-pooled buffer of {size} octets, leaving memory not initialized, so the contents * of the newly created Buffer are unknown and may contain sensitive data. * * @param size count of octets to allocate */ static allocUnsafeSlow(size: number): Buffer; } }
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/safe-buffer/index.d.ts
0.871064
0.517388
index.d.ts
pypi
# qs <sup>[![Version Badge][2]][1]</sup> [![Build Status][3]][4] [![dependency status][5]][6] [![dev dependency status][7]][8] [![License][license-image]][license-url] [![Downloads][downloads-image]][downloads-url] [![npm badge][11]][1] A querystring parsing and stringifying library with some added security. Lead Maintainer: [Jordan Harband](https://github.com/ljharb) The **qs** module was originally created and maintained by [TJ Holowaychuk](https://github.com/visionmedia/node-querystring). ## Usage ```javascript var qs = require('qs'); var assert = require('assert'); var obj = qs.parse('a=c'); assert.deepEqual(obj, { a: 'c' }); var str = qs.stringify(obj); assert.equal(str, 'a=c'); ``` ### Parsing Objects [](#preventEval) ```javascript qs.parse(string, [options]); ``` **qs** allows you to create nested objects within your query strings, by surrounding the name of sub-keys with square brackets `[]`. For example, the string `'foo[bar]=baz'` converts to: ```javascript assert.deepEqual(qs.parse('foo[bar]=baz'), { foo: { bar: 'baz' } }); ``` When using the `plainObjects` option the parsed value is returned as a null object, created via `Object.create(null)` and as such you should be aware that prototype methods will not exist on it and a user may set those names to whatever value they like: ```javascript var nullObject = qs.parse('a[hasOwnProperty]=b', { plainObjects: true }); assert.deepEqual(nullObject, { a: { hasOwnProperty: 'b' } }); ``` By default parameters that would overwrite properties on the object prototype are ignored, if you wish to keep the data from those fields either use `plainObjects` as mentioned above, or set `allowPrototypes` to `true` which will allow user input to overwrite those properties. *WARNING* It is generally a bad idea to enable this option as it can cause problems when attempting to use the properties that have been overwritten. Always be careful with this option. ```javascript var protoObject = qs.parse('a[hasOwnProperty]=b', { allowPrototypes: true }); assert.deepEqual(protoObject, { a: { hasOwnProperty: 'b' } }); ``` URI encoded strings work too: ```javascript assert.deepEqual(qs.parse('a%5Bb%5D=c'), { a: { b: 'c' } }); ``` You can also nest your objects, like `'foo[bar][baz]=foobarbaz'`: ```javascript assert.deepEqual(qs.parse('foo[bar][baz]=foobarbaz'), { foo: { bar: { baz: 'foobarbaz' } } }); ``` By default, when nesting objects **qs** will only parse up to 5 children deep. This means if you attempt to parse a string like `'a[b][c][d][e][f][g][h][i]=j'` your resulting object will be: ```javascript var expected = { a: { b: { c: { d: { e: { f: { '[g][h][i]': 'j' } } } } } } }; var string = 'a[b][c][d][e][f][g][h][i]=j'; assert.deepEqual(qs.parse(string), expected); ``` This depth can be overridden by passing a `depth` option to `qs.parse(string, [options])`: ```javascript var deep = qs.parse('a[b][c][d][e][f][g][h][i]=j', { depth: 1 }); assert.deepEqual(deep, { a: { b: { '[c][d][e][f][g][h][i]': 'j' } } }); ``` The depth limit helps mitigate abuse when **qs** is used to parse user input, and it is recommended to keep it a reasonably small number. For similar reasons, by default **qs** will only parse up to 1000 parameters. This can be overridden by passing a `parameterLimit` option: ```javascript var limited = qs.parse('a=b&c=d', { parameterLimit: 1 }); assert.deepEqual(limited, { a: 'b' }); ``` To bypass the leading question mark, use `ignoreQueryPrefix`: ```javascript var prefixed = qs.parse('?a=b&c=d', { ignoreQueryPrefix: true }); assert.deepEqual(prefixed, { a: 'b', c: 'd' }); ``` An optional delimiter can also be passed: ```javascript var delimited = qs.parse('a=b;c=d', { delimiter: ';' }); assert.deepEqual(delimited, { a: 'b', c: 'd' }); ``` Delimiters can be a regular expression too: ```javascript var regexed = qs.parse('a=b;c=d,e=f', { delimiter: /[;,]/ }); assert.deepEqual(regexed, { a: 'b', c: 'd', e: 'f' }); ``` Option `allowDots` can be used to enable dot notation: ```javascript var withDots = qs.parse('a.b=c', { allowDots: true }); assert.deepEqual(withDots, { a: { b: 'c' } }); ``` ### Parsing Arrays **qs** can also parse arrays using a similar `[]` notation: ```javascript var withArray = qs.parse('a[]=b&a[]=c'); assert.deepEqual(withArray, { a: ['b', 'c'] }); ``` You may specify an index as well: ```javascript var withIndexes = qs.parse('a[1]=c&a[0]=b'); assert.deepEqual(withIndexes, { a: ['b', 'c'] }); ``` Note that the only difference between an index in an array and a key in an object is that the value between the brackets must be a number to create an array. When creating arrays with specific indices, **qs** will compact a sparse array to only the existing values preserving their order: ```javascript var noSparse = qs.parse('a[1]=b&a[15]=c'); assert.deepEqual(noSparse, { a: ['b', 'c'] }); ``` Note that an empty string is also a value, and will be preserved: ```javascript var withEmptyString = qs.parse('a[]=&a[]=b'); assert.deepEqual(withEmptyString, { a: ['', 'b'] }); var withIndexedEmptyString = qs.parse('a[0]=b&a[1]=&a[2]=c'); assert.deepEqual(withIndexedEmptyString, { a: ['b', '', 'c'] }); ``` **qs** will also limit specifying indices in an array to a maximum index of `20`. Any array members with an index of greater than `20` will instead be converted to an object with the index as the key: ```javascript var withMaxIndex = qs.parse('a[100]=b'); assert.deepEqual(withMaxIndex, { a: { '100': 'b' } }); ``` This limit can be overridden by passing an `arrayLimit` option: ```javascript var withArrayLimit = qs.parse('a[1]=b', { arrayLimit: 0 }); assert.deepEqual(withArrayLimit, { a: { '1': 'b' } }); ``` To disable array parsing entirely, set `parseArrays` to `false`. ```javascript var noParsingArrays = qs.parse('a[]=b', { parseArrays: false }); assert.deepEqual(noParsingArrays, { a: { '0': 'b' } }); ``` If you mix notations, **qs** will merge the two items into an object: ```javascript var mixedNotation = qs.parse('a[0]=b&a[b]=c'); assert.deepEqual(mixedNotation, { a: { '0': 'b', b: 'c' } }); ``` You can also create arrays of objects: ```javascript var arraysOfObjects = qs.parse('a[][b]=c'); assert.deepEqual(arraysOfObjects, { a: [{ b: 'c' }] }); ``` ### Stringifying [](#preventEval) ```javascript qs.stringify(object, [options]); ``` When stringifying, **qs** by default URI encodes output. Objects are stringified as you would expect: ```javascript assert.equal(qs.stringify({ a: 'b' }), 'a=b'); assert.equal(qs.stringify({ a: { b: 'c' } }), 'a%5Bb%5D=c'); ``` This encoding can be disabled by setting the `encode` option to `false`: ```javascript var unencoded = qs.stringify({ a: { b: 'c' } }, { encode: false }); assert.equal(unencoded, 'a[b]=c'); ``` Encoding can be disabled for keys by setting the `encodeValuesOnly` option to `true`: ```javascript var encodedValues = qs.stringify( { a: 'b', c: ['d', 'e=f'], f: [['g'], ['h']] }, { encodeValuesOnly: true } ); assert.equal(encodedValues,'a=b&c[0]=d&c[1]=e%3Df&f[0][0]=g&f[1][0]=h'); ``` This encoding can also be replaced by a custom encoding method set as `encoder` option: ```javascript var encoded = qs.stringify({ a: { b: 'c' } }, { encoder: function (str) { // Passed in values `a`, `b`, `c` return // Return encoded string }}) ``` _(Note: the `encoder` option does not apply if `encode` is `false`)_ Analogue to the `encoder` there is a `decoder` option for `parse` to override decoding of properties and values: ```javascript var decoded = qs.parse('x=z', { decoder: function (str) { // Passed in values `x`, `z` return // Return decoded string }}) ``` Examples beyond this point will be shown as though the output is not URI encoded for clarity. Please note that the return values in these cases *will* be URI encoded during real usage. When arrays are stringified, by default they are given explicit indices: ```javascript qs.stringify({ a: ['b', 'c', 'd'] }); // 'a[0]=b&a[1]=c&a[2]=d' ``` You may override this by setting the `indices` option to `false`: ```javascript qs.stringify({ a: ['b', 'c', 'd'] }, { indices: false }); // 'a=b&a=c&a=d' ``` You may use the `arrayFormat` option to specify the format of the output array: ```javascript qs.stringify({ a: ['b', 'c'] }, { arrayFormat: 'indices' }) // 'a[0]=b&a[1]=c' qs.stringify({ a: ['b', 'c'] }, { arrayFormat: 'brackets' }) // 'a[]=b&a[]=c' qs.stringify({ a: ['b', 'c'] }, { arrayFormat: 'repeat' }) // 'a=b&a=c' ``` When objects are stringified, by default they use bracket notation: ```javascript qs.stringify({ a: { b: { c: 'd', e: 'f' } } }); // 'a[b][c]=d&a[b][e]=f' ``` You may override this to use dot notation by setting the `allowDots` option to `true`: ```javascript qs.stringify({ a: { b: { c: 'd', e: 'f' } } }, { allowDots: true }); // 'a.b.c=d&a.b.e=f' ``` Empty strings and null values will omit the value, but the equals sign (=) remains in place: ```javascript assert.equal(qs.stringify({ a: '' }), 'a='); ``` Key with no values (such as an empty object or array) will return nothing: ```javascript assert.equal(qs.stringify({ a: [] }), ''); assert.equal(qs.stringify({ a: {} }), ''); assert.equal(qs.stringify({ a: [{}] }), ''); assert.equal(qs.stringify({ a: { b: []} }), ''); assert.equal(qs.stringify({ a: { b: {}} }), ''); ``` Properties that are set to `undefined` will be omitted entirely: ```javascript assert.equal(qs.stringify({ a: null, b: undefined }), 'a='); ``` The query string may optionally be prepended with a question mark: ```javascript assert.equal(qs.stringify({ a: 'b', c: 'd' }, { addQueryPrefix: true }), '?a=b&c=d'); ``` The delimiter may be overridden with stringify as well: ```javascript assert.equal(qs.stringify({ a: 'b', c: 'd' }, { delimiter: ';' }), 'a=b;c=d'); ``` If you only want to override the serialization of `Date` objects, you can provide a `serializeDate` option: ```javascript var date = new Date(7); assert.equal(qs.stringify({ a: date }), 'a=1970-01-01T00:00:00.007Z'.replace(/:/g, '%3A')); assert.equal( qs.stringify({ a: date }, { serializeDate: function (d) { return d.getTime(); } }), 'a=7' ); ``` You may use the `sort` option to affect the order of parameter keys: ```javascript function alphabeticalSort(a, b) { return a.localeCompare(b); } assert.equal(qs.stringify({ a: 'c', z: 'y', b : 'f' }, { sort: alphabeticalSort }), 'a=c&b=f&z=y'); ``` Finally, you can use the `filter` option to restrict which keys will be included in the stringified output. If you pass a function, it will be called for each key to obtain the replacement value. Otherwise, if you pass an array, it will be used to select properties and array indices for stringification: ```javascript function filterFunc(prefix, value) { if (prefix == 'b') { // Return an `undefined` value to omit a property. return; } if (prefix == 'e[f]') { return value.getTime(); } if (prefix == 'e[g][0]') { return value * 2; } return value; } qs.stringify({ a: 'b', c: 'd', e: { f: new Date(123), g: [2] } }, { filter: filterFunc }); // 'a=b&c=d&e[f]=123&e[g][0]=4' qs.stringify({ a: 'b', c: 'd', e: 'f' }, { filter: ['a', 'e'] }); // 'a=b&e=f' qs.stringify({ a: ['b', 'c', 'd'], e: 'f' }, { filter: ['a', 0, 2] }); // 'a[0]=b&a[2]=d' ``` ### Handling of `null` values By default, `null` values are treated like empty strings: ```javascript var withNull = qs.stringify({ a: null, b: '' }); assert.equal(withNull, 'a=&b='); ``` Parsing does not distinguish between parameters with and without equal signs. Both are converted to empty strings. ```javascript var equalsInsensitive = qs.parse('a&b='); assert.deepEqual(equalsInsensitive, { a: '', b: '' }); ``` To distinguish between `null` values and empty strings use the `strictNullHandling` flag. In the result string the `null` values have no `=` sign: ```javascript var strictNull = qs.stringify({ a: null, b: '' }, { strictNullHandling: true }); assert.equal(strictNull, 'a&b='); ``` To parse values without `=` back to `null` use the `strictNullHandling` flag: ```javascript var parsedStrictNull = qs.parse('a&b=', { strictNullHandling: true }); assert.deepEqual(parsedStrictNull, { a: null, b: '' }); ``` To completely skip rendering keys with `null` values, use the `skipNulls` flag: ```javascript var nullsSkipped = qs.stringify({ a: 'b', c: null}, { skipNulls: true }); assert.equal(nullsSkipped, 'a=b'); ``` ### Dealing with special character sets By default the encoding and decoding of characters is done in `utf-8`. If you wish to encode querystrings to a different character set (i.e. [Shift JIS](https://en.wikipedia.org/wiki/Shift_JIS)) you can use the [`qs-iconv`](https://github.com/martinheidegger/qs-iconv) library: ```javascript var encoder = require('qs-iconv/encoder')('shift_jis'); var shiftJISEncoded = qs.stringify({ a: 'こんにちは!' }, { encoder: encoder }); assert.equal(shiftJISEncoded, 'a=%82%B1%82%F1%82%C9%82%BF%82%CD%81I'); ``` This also works for decoding of query strings: ```javascript var decoder = require('qs-iconv/decoder')('shift_jis'); var obj = qs.parse('a=%82%B1%82%F1%82%C9%82%BF%82%CD%81I', { decoder: decoder }); assert.deepEqual(obj, { a: 'こんにちは!' }); ``` ### RFC 3986 and RFC 1738 space encoding RFC3986 used as default option and encodes ' ' to *%20* which is backward compatible. In the same time, output can be stringified as per RFC1738 with ' ' equal to '+'. ``` assert.equal(qs.stringify({ a: 'b c' }), 'a=b%20c'); assert.equal(qs.stringify({ a: 'b c' }, { format : 'RFC3986' }), 'a=b%20c'); assert.equal(qs.stringify({ a: 'b c' }, { format : 'RFC1738' }), 'a=b+c'); ``` [1]: https://npmjs.org/package/qs [2]: http://versionbadg.es/ljharb/qs.svg [3]: https://api.travis-ci.org/ljharb/qs.svg [4]: https://travis-ci.org/ljharb/qs [5]: https://david-dm.org/ljharb/qs.svg [6]: https://david-dm.org/ljharb/qs [7]: https://david-dm.org/ljharb/qs/dev-status.svg [8]: https://david-dm.org/ljharb/qs?type=dev [9]: https://ci.testling.com/ljharb/qs.png [10]: https://ci.testling.com/ljharb/qs [11]: https://nodei.co/npm/qs.png?downloads=true&stars=true [license-image]: http://img.shields.io/npm/l/qs.svg [license-url]: LICENSE [downloads-image]: http://img.shields.io/npm/dm/qs.svg [downloads-url]: http://npm-stat.com/charts.html?package=qs
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/qs/README.md
0.401805
0.926802
README.md
pypi
# safer-buffer [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![javascript style guide][standard-image]][standard-url] [![Security Responsible Disclosure][secuirty-image]][secuirty-url] [travis-image]: https://travis-ci.org/ChALkeR/safer-buffer.svg?branch=master [travis-url]: https://travis-ci.org/ChALkeR/safer-buffer [npm-image]: https://img.shields.io/npm/v/safer-buffer.svg [npm-url]: https://npmjs.org/package/safer-buffer [standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg [standard-url]: https://standardjs.com [secuirty-image]: https://img.shields.io/badge/Security-Responsible%20Disclosure-green.svg [secuirty-url]: https://github.com/nodejs/security-wg/blob/master/processes/responsible_disclosure_template.md Modern Buffer API polyfill without footguns, working on Node.js from 0.8 to current. ## How to use? First, port all `Buffer()` and `new Buffer()` calls to `Buffer.alloc()` and `Buffer.from()` API. Then, to achieve compatibility with outdated Node.js versions (`<4.5.0` and 5.x `<5.9.0`), use `const Buffer = require('safer-buffer').Buffer` in all files where you make calls to the new Buffer API. _Use `var` instead of `const` if you need that for your Node.js version range support._ Also, see the [porting Buffer](https://github.com/ChALkeR/safer-buffer/blob/master/Porting-Buffer.md) guide. ## Do I need it? Hopefully, not — dropping support for outdated Node.js versions should be fine nowdays, and that is the recommended path forward. You _do_ need to port to the `Buffer.alloc()` and `Buffer.from()` though. See the [porting guide](https://github.com/ChALkeR/safer-buffer/blob/master/Porting-Buffer.md) for a better description. ## Why not [safe-buffer](https://npmjs.com/safe-buffer)? _In short: while `safe-buffer` serves as a polyfill for the new API, it allows old API usage and itself contains footguns._ `safe-buffer` could be used safely to get the new API while still keeping support for older Node.js versions (like this module), but while analyzing ecosystem usage of the old Buffer API I found out that `safe-buffer` is itself causing problems in some cases. For example, consider the following snippet: ```console $ cat example.unsafe.js console.log(Buffer(20)) $ ./node-v6.13.0-linux-x64/bin/node example.unsafe.js <Buffer 0a 00 00 00 00 00 00 00 28 13 de 02 00 00 00 00 05 00 00 00> $ standard example.unsafe.js standard: Use JavaScript Standard Style (https://standardjs.com) /home/chalker/repo/safer-buffer/example.unsafe.js:2:13: 'Buffer()' was deprecated since v6. Use 'Buffer.alloc()' or 'Buffer.from()' (use 'https://www.npmjs.com/package/safe-buffer' for '<4.5.0') instead. ``` This is allocates and writes to console an uninitialized chunk of memory. [standard](https://www.npmjs.com/package/standard) linter (among others) catch that and warn people to avoid using unsafe API. Let's now throw in `safe-buffer`! ```console $ cat example.safe-buffer.js const Buffer = require('safe-buffer').Buffer console.log(Buffer(20)) $ standard example.safe-buffer.js $ ./node-v6.13.0-linux-x64/bin/node example.safe-buffer.js <Buffer 08 00 00 00 00 00 00 00 28 58 01 82 fe 7f 00 00 00 00 00 00> ``` See the problem? Adding in `safe-buffer` _magically removes the lint warning_, but the behavior remains identiсal to what we had before, and when launched on Node.js 6.x LTS — this dumps out chunks of uninitialized memory. _And this code will still emit runtime warnings on Node.js 10.x and above._ That was done by design. I first considered changing `safe-buffer`, prohibiting old API usage or emitting warnings on it, but that significantly diverges from `safe-buffer` design. After some discussion, it was decided to move my approach into a separate package, and _this is that separate package_. This footgun is not imaginary — I observed top-downloaded packages doing that kind of thing, «fixing» the lint warning by blindly including `safe-buffer` without any actual changes. Also in some cases, even if the API _was_ migrated to use of safe Buffer API — a random pull request can bring unsafe Buffer API usage back to the codebase by adding new calls — and that could go unnoticed even if you have a linter prohibiting that (becase of the reason stated above), and even pass CI. _I also observed that being done in popular packages._ Some examples: * [webdriverio](https://github.com/webdriverio/webdriverio/commit/05cbd3167c12e4930f09ef7cf93b127ba4effae4#diff-124380949022817b90b622871837d56cR31) (a module with 548 759 downloads/month), * [websocket-stream](https://github.com/maxogden/websocket-stream/commit/c9312bd24d08271687d76da0fe3c83493871cf61) (218 288 d/m, fix in [maxogden/websocket-stream#142](https://github.com/maxogden/websocket-stream/pull/142)), * [node-serialport](https://github.com/node-serialport/node-serialport/commit/e8d9d2b16c664224920ce1c895199b1ce2def48c) (113 138 d/m, fix in [node-serialport/node-serialport#1510](https://github.com/node-serialport/node-serialport/pull/1510)), * [karma](https://github.com/karma-runner/karma/commit/3d94b8cf18c695104ca195334dc75ff054c74eec) (3 973 193 d/m, fix in [karma-runner/karma#2947](https://github.com/karma-runner/karma/pull/2947)), * [spdy-transport](https://github.com/spdy-http2/spdy-transport/commit/5375ac33f4a62a4f65bcfc2827447d42a5dbe8b1) (5 970 727 d/m, fix in [spdy-http2/spdy-transport#53](https://github.com/spdy-http2/spdy-transport/pull/53)). * And there are a lot more over the ecosystem. I filed a PR at [mysticatea/eslint-plugin-node#110](https://github.com/mysticatea/eslint-plugin-node/pull/110) to partially fix that (for cases when that lint rule is used), but it is a semver-major change for linter rules and presets, so it would take significant time for that to reach actual setups. _It also hasn't been released yet (2018-03-20)._ Also, `safer-buffer` discourages the usage of `.allocUnsafe()`, which is often done by a mistake. It still supports it with an explicit concern barier, by placing it under `require('safer-buffer/dangereous')`. ## But isn't throwing bad? Not really. It's an error that could be noticed and fixed early, instead of causing havoc later like unguarded `new Buffer()` calls that end up receiving user input can do. This package affects only the files where `var Buffer = require('safer-buffer').Buffer` was done, so it is really simple to keep track of things and make sure that you don't mix old API usage with that. Also, CI should hint anything that you might have missed. New commits, if tested, won't land new usage of unsafe Buffer API this way. _Node.js 10.x also deals with that by printing a runtime depecation warning._ ### Would it affect third-party modules? No, unless you explicitly do an awful thing like monkey-patching or overriding the built-in `Buffer`. Don't do that. ### But I don't want throwing… That is also fine! Also, it could be better in some cases when you don't comprehensive enough test coverage. In that case — just don't override `Buffer` and use `var SaferBuffer = require('safer-buffer').Buffer` instead. That way, everything using `Buffer` natively would still work, but there would be two drawbacks: * `Buffer.from`/`Buffer.alloc` won't be polyfilled — use `SaferBuffer.from` and `SaferBuffer.alloc` instead. * You are still open to accidentally using the insecure deprecated API — use a linter to catch that. Note that using a linter to catch accidential `Buffer` constructor usage in this case is strongly recommended. `Buffer` is not overriden in this usecase, so linters won't get confused. ## «Without footguns»? Well, it is still possible to do _some_ things with `Buffer` API, e.g. accessing `.buffer` property on older versions and duping things from there. You shouldn't do that in your code, probabably. The intention is to remove the most significant footguns that affect lots of packages in the ecosystem, and to do it in the proper way. Also, this package doesn't protect against security issues affecting some Node.js versions, so for usage in your own production code, it is still recommended to update to a Node.js version [supported by upstream](https://github.com/nodejs/release#release-schedule).
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/safer-buffer/Readme.md
0.446977
0.791096
Readme.md
pypi
# Porting to the Buffer.from/Buffer.alloc API <a id="overview"></a> ## Overview - [Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x.](#variant-1) (*recommended*) - [Variant 2: Use a polyfill](#variant-2) - [Variant 3: manual detection, with safeguards](#variant-3) ### Finding problematic bits of code using grep Just run `grep -nrE '[^a-zA-Z](Slow)?Buffer\s*\(' --exclude-dir node_modules`. It will find all the potentially unsafe places in your own code (with some considerably unlikely exceptions). ### Finding problematic bits of code using Node.js 8 If you’re using Node.js ≥ 8.0.0 (which is recommended), Node.js exposes multiple options that help with finding the relevant pieces of code: - `--trace-warnings` will make Node.js show a stack trace for this warning and other warnings that are printed by Node.js. - `--trace-deprecation` does the same thing, but only for deprecation warnings. - `--pending-deprecation` will show more types of deprecation warnings. In particular, it will show the `Buffer()` deprecation warning, even on Node.js 8. You can set these flags using an environment variable: ```console $ export NODE_OPTIONS='--trace-warnings --pending-deprecation' $ cat example.js 'use strict'; const foo = new Buffer('foo'); $ node example.js (node:7147) [DEP0005] DeprecationWarning: The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead. at showFlaggedDeprecation (buffer.js:127:13) at new Buffer (buffer.js:148:3) at Object.<anonymous> (/path/to/example.js:2:13) [... more stack trace lines ...] ``` ### Finding problematic bits of code using linters Eslint rules [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) also find calls to deprecated `Buffer()` API. Those rules are included in some pre-sets. There is a drawback, though, that it doesn't always [work correctly](https://github.com/chalker/safer-buffer#why-not-safe-buffer) when `Buffer` is overriden e.g. with a polyfill, so recommended is a combination of this and some other method described above. <a id="variant-1"></a> ## Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x. This is the recommended solution nowadays that would imply only minimal overhead. The Node.js 5.x release line has been unsupported since July 2016, and the Node.js 4.x release line reaches its End of Life in April 2018 (→ [Schedule](https://github.com/nodejs/Release#release-schedule)). This means that these versions of Node.js will *not* receive any updates, even in case of security issues, so using these release lines should be avoided, if at all possible. What you would do in this case is to convert all `new Buffer()` or `Buffer()` calls to use `Buffer.alloc()` or `Buffer.from()`, in the following way: - For `new Buffer(number)`, replace it with `Buffer.alloc(number)`. - For `new Buffer(string)` (or `new Buffer(string, encoding)`), replace it with `Buffer.from(string)` (or `Buffer.from(string, encoding)`). - For all other combinations of arguments (these are much rarer), also replace `new Buffer(...arguments)` with `Buffer.from(...arguments)`. Note that `Buffer.alloc()` is also _faster_ on the current Node.js versions than `new Buffer(size).fill(0)`, which is what you would otherwise need to ensure zero-filling. Enabling eslint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) is recommended to avoid accidential unsafe Buffer API usage. There is also a [JSCodeshift codemod](https://github.com/joyeecheung/node-dep-codemod#dep005) for automatically migrating Buffer constructors to `Buffer.alloc()` or `Buffer.from()`. Note that it currently only works with cases where the arguments are literals or where the constructor is invoked with two arguments. _If you currently support those older Node.js versions and dropping them would be a semver-major change for you, or if you support older branches of your packages, consider using [Variant 2](#variant-2) or [Variant 3](#variant-3) on older branches, so people using those older branches will also receive the fix. That way, you will eradicate potential issues caused by unguarded Buffer API usage and your users will not observe a runtime deprecation warning when running your code on Node.js 10._ <a id="variant-2"></a> ## Variant 2: Use a polyfill Utilize [safer-buffer](https://www.npmjs.com/package/safer-buffer) as a polyfill to support older Node.js versions. You would take exacly the same steps as in [Variant 1](#variant-1), but with a polyfill `const Buffer = require('safer-buffer').Buffer` in all files where you use the new `Buffer` api. Make sure that you do not use old `new Buffer` API — in any files where the line above is added, using old `new Buffer()` API will _throw_. It will be easy to notice that in CI, though. Alternatively, you could use [buffer-from](https://www.npmjs.com/package/buffer-from) and/or [buffer-alloc](https://www.npmjs.com/package/buffer-alloc) [ponyfills](https://ponyfill.com/) — those are great, the only downsides being 4 deps in the tree and slightly more code changes to migrate off them (as you would be using e.g. `Buffer.from` under a different name). If you need only `Buffer.from` polyfilled — `buffer-from` alone which comes with no extra dependencies. _Alternatively, you could use [safe-buffer](https://www.npmjs.com/package/safe-buffer) — it also provides a polyfill, but takes a different approach which has [it's drawbacks](https://github.com/chalker/safer-buffer#why-not-safe-buffer). It will allow you to also use the older `new Buffer()` API in your code, though — but that's arguably a benefit, as it is problematic, can cause issues in your code, and will start emitting runtime deprecation warnings starting with Node.js 10._ Note that in either case, it is important that you also remove all calls to the old Buffer API manually — just throwing in `safe-buffer` doesn't fix the problem by itself, it just provides a polyfill for the new API. I have seen people doing that mistake. Enabling eslint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) is recommended. _Don't forget to drop the polyfill usage once you drop support for Node.js < 4.5.0._ <a id="variant-3"></a> ## Variant 3 — manual detection, with safeguards This is useful if you create Buffer instances in only a few places (e.g. one), or you have your own wrapper around them. ### Buffer(0) This special case for creating empty buffers can be safely replaced with `Buffer.concat([])`, which returns the same result all the way down to Node.js 0.8.x. ### Buffer(notNumber) Before: ```js var buf = new Buffer(notNumber, encoding); ``` After: ```js var buf; if (Buffer.from && Buffer.from !== Uint8Array.from) { buf = Buffer.from(notNumber, encoding); } else { if (typeof notNumber === 'number') throw new Error('The "size" argument must be of type number.'); buf = new Buffer(notNumber, encoding); } ``` `encoding` is optional. Note that the `typeof notNumber` before `new Buffer` is required (for cases when `notNumber` argument is not hard-coded) and _is not caused by the deprecation of Buffer constructor_ — it's exactly _why_ the Buffer constructor is deprecated. Ecosystem packages lacking this type-check caused numereous security issues — situations when unsanitized user input could end up in the `Buffer(arg)` create problems ranging from DoS to leaking sensitive information to the attacker from the process memory. When `notNumber` argument is hardcoded (e.g. literal `"abc"` or `[0,1,2]`), the `typeof` check can be omitted. Also note that using TypeScript does not fix this problem for you — when libs written in `TypeScript` are used from JS, or when user input ends up there — it behaves exactly as pure JS, as all type checks are translation-time only and are not present in the actual JS code which TS compiles to. ### Buffer(number) For Node.js 0.10.x (and below) support: ```js var buf; if (Buffer.alloc) { buf = Buffer.alloc(number); } else { buf = new Buffer(number); buf.fill(0); } ``` Otherwise (Node.js ≥ 0.12.x): ```js const buf = Buffer.alloc ? Buffer.alloc(number) : new Buffer(number).fill(0); ``` ## Regarding Buffer.allocUnsafe Be extra cautious when using `Buffer.allocUnsafe`: * Don't use it if you don't have a good reason to * e.g. you probably won't ever see a performance difference for small buffers, in fact, those might be even faster with `Buffer.alloc()`, * if your code is not in the hot code path — you also probably won't notice a difference, * keep in mind that zero-filling minimizes the potential risks. * If you use it, make sure that you never return the buffer in a partially-filled state, * if you are writing to it sequentially — always truncate it to the actuall written length Errors in handling buffers allocated with `Buffer.allocUnsafe` could result in various issues, ranged from undefined behaviour of your code to sensitive data (user input, passwords, certs) leaking to the remote attacker. _Note that the same applies to `new Buffer` usage without zero-filling, depending on the Node.js version (and lacking type checks also adds DoS to the list of potential problems)._ <a id="faq"></a> ## FAQ <a id="design-flaws"></a> ### What is wrong with the `Buffer` constructor? The `Buffer` constructor could be used to create a buffer in many different ways: - `new Buffer(42)` creates a `Buffer` of 42 bytes. Before Node.js 8, this buffer contained *arbitrary memory* for performance reasons, which could include anything ranging from program source code to passwords and encryption keys. - `new Buffer('abc')` creates a `Buffer` that contains the UTF-8-encoded version of the string `'abc'`. A second argument could specify another encoding: For example, `new Buffer(string, 'base64')` could be used to convert a Base64 string into the original sequence of bytes that it represents. - There are several other combinations of arguments. This meant that, in code like `var buffer = new Buffer(foo);`, *it is not possible to tell what exactly the contents of the generated buffer are* without knowing the type of `foo`. Sometimes, the value of `foo` comes from an external source. For example, this function could be exposed as a service on a web server, converting a UTF-8 string into its Base64 form: ``` function stringToBase64(req, res) { // The request body should have the format of `{ string: 'foobar' }` const rawBytes = new Buffer(req.body.string) const encoded = rawBytes.toString('base64') res.end({ encoded: encoded }) } ``` Note that this code does *not* validate the type of `req.body.string`: - `req.body.string` is expected to be a string. If this is the case, all goes well. - `req.body.string` is controlled by the client that sends the request. - If `req.body.string` is the *number* `50`, the `rawBytes` would be 50 bytes: - Before Node.js 8, the content would be uninitialized - After Node.js 8, the content would be `50` bytes with the value `0` Because of the missing type check, an attacker could intentionally send a number as part of the request. Using this, they can either: - Read uninitialized memory. This **will** leak passwords, encryption keys and other kinds of sensitive information. (Information leak) - Force the program to allocate a large amount of memory. For example, when specifying `500000000` as the input value, each request will allocate 500MB of memory. This can be used to either exhaust the memory available of a program completely and make it crash, or slow it down significantly. (Denial of Service) Both of these scenarios are considered serious security issues in a real-world web server context. when using `Buffer.from(req.body.string)` instead, passing a number will always throw an exception instead, giving a controlled behaviour that can always be handled by the program. <a id="ecosystem-usage"></a> ### The `Buffer()` constructor has been deprecated for a while. Is this really an issue? Surveys of code in the `npm` ecosystem have shown that the `Buffer()` constructor is still widely used. This includes new code, and overall usage of such code has actually been *increasing*.
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/safer-buffer/Porting-Buffer.md
0.68679
0.852014
Porting-Buffer.md
pypi
JS-YAML - YAML 1.2 parser / writer for JavaScript ================================================= [![Build Status](https://travis-ci.org/nodeca/js-yaml.svg?branch=master)](https://travis-ci.org/nodeca/js-yaml) [![NPM version](https://img.shields.io/npm/v/js-yaml.svg)](https://www.npmjs.org/package/js-yaml) __[Online Demo](http://nodeca.github.com/js-yaml/)__ This is an implementation of [YAML](http://yaml.org/), a human-friendly data serialization language. Started as [PyYAML](http://pyyaml.org/) port, it was completely rewritten from scratch. Now it's very fast, and supports 1.2 spec. Installation ------------ ### YAML module for node.js ``` npm install js-yaml ``` ### CLI executable If you want to inspect your YAML files from CLI, install js-yaml globally: ``` npm install -g js-yaml ``` #### Usage ``` usage: js-yaml [-h] [-v] [-c] [-t] file Positional arguments: file File with YAML document(s) Optional arguments: -h, --help Show this help message and exit. -v, --version Show program's version number and exit. -c, --compact Display errors in compact mode -t, --trace Show stack trace on error ``` ### Bundled YAML library for browsers ``` html <!-- esprima required only for !!js/function --> <script src="esprima.js"></script> <script src="js-yaml.min.js"></script> <script type="text/javascript"> var doc = jsyaml.load('greeting: hello\nname: world'); </script> ``` Browser support was done mostly for the online demo. If you find any errors - feel free to send pull requests with fixes. Also note, that IE and other old browsers needs [es5-shims](https://github.com/kriskowal/es5-shim) to operate. Notes: 1. We have no resources to support browserified version. Don't expect it to be well tested. Don't expect fast fixes if something goes wrong there. 2. `!!js/function` in browser bundle will not work by default. If you really need it - load `esprima` parser first (via amd or directly). 3. `!!bin` in browser will return `Array`, because browsers do not support node.js `Buffer` and adding Buffer shims is completely useless on practice. API --- Here we cover the most 'useful' methods. If you need advanced details (creating your own tags), see [wiki](https://github.com/nodeca/js-yaml/wiki) and [examples](https://github.com/nodeca/js-yaml/tree/master/examples) for more info. ``` javascript yaml = require('js-yaml'); fs = require('fs'); // Get document, or throw exception on error try { var doc = yaml.safeLoad(fs.readFileSync('/home/ixti/example.yml', 'utf8')); console.log(doc); } catch (e) { console.log(e); } ``` ### safeLoad (string [ , options ]) **Recommended loading way.** Parses `string` as single YAML document. Returns a JavaScript object or throws `YAMLException` on error. By default, does not support regexps, functions and undefined. This method is safe for untrusted data. options: - `filename` _(default: null)_ - string to be used as a file path in error/warning messages. - `onWarning` _(default: null)_ - function to call on warning messages. Loader will throw on warnings if this function is not provided. - `schema` _(default: `DEFAULT_SAFE_SCHEMA`)_ - specifies a schema to use. - `FAILSAFE_SCHEMA` - only strings, arrays and plain objects: http://www.yaml.org/spec/1.2/spec.html#id2802346 - `JSON_SCHEMA` - all JSON-supported types: http://www.yaml.org/spec/1.2/spec.html#id2803231 - `CORE_SCHEMA` - same as `JSON_SCHEMA`: http://www.yaml.org/spec/1.2/spec.html#id2804923 - `DEFAULT_SAFE_SCHEMA` - all supported YAML types, without unsafe ones (`!!js/undefined`, `!!js/regexp` and `!!js/function`): http://yaml.org/type/ - `DEFAULT_FULL_SCHEMA` - all supported YAML types. - `json` _(default: false)_ - compatibility with JSON.parse behaviour. If true, then duplicate keys in a mapping will override values rather than throwing an error. NOTE: This function **does not** understand multi-document sources, it throws exception on those. NOTE: JS-YAML **does not** support schema-specific tag resolution restrictions. So, the JSON schema is not as strictly defined in the YAML specification. It allows numbers in any notation, use `Null` and `NULL` as `null`, etc. The core schema also has no such restrictions. It allows binary notation for integers. ### load (string [ , options ]) **Use with care with untrusted sources**. The same as `safeLoad()` but uses `DEFAULT_FULL_SCHEMA` by default - adds some JavaScript-specific types: `!!js/function`, `!!js/regexp` and `!!js/undefined`. For untrusted sources, you must additionally validate object structure to avoid injections: ``` javascript var untrusted_code = '"toString": !<tag:yaml.org,2002:js/function> "function (){very_evil_thing();}"'; // I'm just converting that string, what could possibly go wrong? require('js-yaml').load(untrusted_code) + '' ``` ### safeLoadAll (string [, iterator] [, options ]) Same as `safeLoad()`, but understands multi-document sources. Applies `iterator` to each document if specified, or returns array of documents. ``` javascript var yaml = require('js-yaml'); yaml.safeLoadAll(data, function (doc) { console.log(doc); }); ``` ### loadAll (string [, iterator] [ , options ]) Same as `safeLoadAll()` but uses `DEFAULT_FULL_SCHEMA` by default. ### safeDump (object [ , options ]) Serializes `object` as a YAML document. Uses `DEFAULT_SAFE_SCHEMA`, so it will throw an exception if you try to dump regexps or functions. However, you can disable exceptions by setting the `skipInvalid` option to `true`. options: - `indent` _(default: 2)_ - indentation width to use (in spaces). - `skipInvalid` _(default: false)_ - do not throw on invalid types (like function in the safe schema) and skip pairs and single values with such types. - `flowLevel` (default: -1) - specifies level of nesting, when to switch from block to flow style for collections. -1 means block style everwhere - `styles` - "tag" => "style" map. Each tag may have own set of styles. - `schema` _(default: `DEFAULT_SAFE_SCHEMA`)_ specifies a schema to use. - `sortKeys` _(default: `false`)_ - if `true`, sort keys when dumping YAML. If a function, use the function to sort the keys. - `lineWidth` _(default: `80`)_ - set max line width. - `noRefs` _(default: `false`)_ - if `true`, don't convert duplicate objects into references - `noCompatMode` _(default: `false`)_ - if `true` don't try to be compatible with older yaml versions. Currently: don't quote "yes", "no" and so on, as required for YAML 1.1 - `condenseFlow` _(default: `false`)_ - if `true` flow sequences will be condensed, omitting the space between `a, b`. Eg. `'[a,b]'`, and omitting the space between `key: value` and quoting the key. Eg. `'{"a":b}'` Can be useful when using yaml for pretty URL query params as spaces are %-encoded. The following table show availlable styles (e.g. "canonical", "binary"...) available for each tag (.e.g. !!null, !!int ...). Yaml output is shown on the right side after `=>` (default setting) or `->`: ``` none !!null "canonical" -> "~" "lowercase" => "null" "uppercase" -> "NULL" "camelcase" -> "Null" !!int "binary" -> "0b1", "0b101010", "0b1110001111010" "octal" -> "01", "052", "016172" "decimal" => "1", "42", "7290" "hexadecimal" -> "0x1", "0x2A", "0x1C7A" !!bool "lowercase" => "true", "false" "uppercase" -> "TRUE", "FALSE" "camelcase" -> "True", "False" !!float "lowercase" => ".nan", '.inf' "uppercase" -> ".NAN", '.INF' "camelcase" -> ".NaN", '.Inf' ``` Example: ``` javascript safeDump (object, { 'styles': { '!!null': 'canonical' // dump null as ~ }, 'sortKeys': true // sort object keys }); ``` ### dump (object [ , options ]) Same as `safeDump()` but without limits (uses `DEFAULT_FULL_SCHEMA` by default). Supported YAML types -------------------- The list of standard YAML tags and corresponding JavaScipt types. See also [YAML tag discussion](http://pyyaml.org/wiki/YAMLTagDiscussion) and [YAML types repository](http://yaml.org/type/). ``` !!null '' # null !!bool 'yes' # bool !!int '3...' # number !!float '3.14...' # number !!binary '...base64...' # buffer !!timestamp 'YYYY-...' # date !!omap [ ... ] # array of key-value pairs !!pairs [ ... ] # array or array pairs !!set { ... } # array of objects with given keys and null values !!str '...' # string !!seq [ ... ] # array !!map { ... } # object ``` **JavaScript-specific tags** ``` !!js/regexp /pattern/gim # RegExp !!js/undefined '' # Undefined !!js/function 'function () {...}' # Function ``` Caveats ------- Note, that you use arrays or objects as key in JS-YAML. JS does not allow objects or arrays as keys, and stringifies (by calling `toString()` method) them at the moment of adding them. ``` yaml --- ? [ foo, bar ] : - baz ? { foo: bar } : - baz - baz ``` ``` javascript { "foo,bar": ["baz"], "[object Object]": ["baz", "baz"] } ``` Also, reading of properties on implicit block mapping keys is not supported yet. So, the following YAML document cannot be loaded. ``` yaml &anchor foo: foo: bar *anchor: duplicate key baz: bat *anchor: duplicate key ``` Breaking changes in 2.x.x -> 3.x.x ---------------------------------- If you have not used __custom__ tags or loader classes and not loaded yaml files via `require()`, no changes are needed. Just upgrade the library. Otherwise, you should: 1. Replace all occurrences of `require('xxxx.yml')` by `fs.readFileSync()` + `yaml.safeLoad()`. 2. rewrite your custom tags constructors and custom loader classes, to conform the new API. See [examples](https://github.com/nodeca/js-yaml/tree/master/examples) and [wiki](https://github.com/nodeca/js-yaml/wiki) for details. License ------- View the [LICENSE](https://github.com/nodeca/js-yaml/blob/master/LICENSE) file (MIT).
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/js-yaml/README.md
0.771585
0.839306
README.md
pypi
import board import neopixel import time class NeoPixelStrip(): """A lightweight wrapper around the NeoPixel LED Strip Usage: led_strip = NeoPixelStrip() led_strip.on(255, 0, 0) # R, G, B time.sleep(1) led_strip.off() """ # Singleton Object Pattern __instance = None def __new__(cls, led_pin=board.D18, num_leds=8, brightness=1.0): if NeoPixelStrip.__instance is None: NeoPixelStrip.__instance = object.__new__(cls) NeoPixelStrip.__instance.__class_init__(led_pin, num_leds, brightness) return NeoPixelStrip.__instance @classmethod def __class_init__(cls, led_pin=board.D18, num_leds=8, brightness=1.0): """NeoPixelStrip Initialization""" cls.led_pin = led_pin cls.num_leds = num_leds cls.brightness = brightness cls.strip = neopixel.NeoPixel(cls.led_pin, cls.num_leds, brightness=cls.brightness) def __init__(self): self.current_rgb = (0, 0, 0) def get_rgb(self): """Return the current color of the LEDs""" return self.current_rgb[0], self.current_rgb[1], self.current_rgb[2] def on(self, red, green, blue): """Turn all the LEDs to one color Args: red: red value (0-255) green: green value (0-255) blue: blue value (0-255) """ self.strip.fill((red, green, blue)) self.current_rgb = (red, green, blue) def off(self): """Turn all the LEDs off""" self.strip.fill((0, 0, 0)) def cleanup(self): """Method that's called when class is destroyed""" print('Cleanup...') self.off() self.strip.deinit() class CommandBlink: def __init__(self, function): self.led_strip = NeoPixelStrip() self.function = function def __call__(self, *args, **kwargs): # Turn LEDs blue self.led_strip.on(0, 0, 255) # Call the function self.function(*args, **kwargs) # Turn LEDs Off time.sleep(0.1) # Quick sleep in case function didn't take long self.led_strip.off() def test(): """Test for the NeoPixelStrip class""" # Create the class led_strip = NeoPixelStrip() # Test the CommandBlink Decorator @CommandBlink def foo(a): print(a) foo(0.5) time.sleep(1.0) for r, g, b in zip(range(128, 0, -1), range(0, 128), range(0, 128)): led_strip.on(r, g, b) time.sleep(0.01) # Yellow led_strip.on(128, 128, 0) time.sleep(1.0) # Orange led_strip.on(255, 128, 0) time.sleep(1.0) # Red led_strip.on(255, 0, 0) time.sleep(1.0) # Green led_strip.on(0, 255, 0) time.sleep(1.0) # Turn off led strip led_strip.off() led_strip.cleanup() if __name__ == '__main__': # Run the test test()
/robot_kit-0.1.5.tar.gz/robot_kit-0.1.5/robot_kit/leds.py
0.762866
0.183265
leds.py
pypi
import time import RPi.GPIO as GPIO class Ultrasonic: """Ultrasonic Sensor Class""" def __init__(self): GPIO.setwarnings(False) self.trigger_pin = 27 self.echo_pin = 22 self.time_distance_factor = 0.000058 # Time to Centimeters conversion self.timeout_distance = 1000 # Distance to return when echo timeout occurs GPIO.setmode(GPIO.BCM) GPIO.setup(self.trigger_pin, GPIO.OUT) GPIO.setup(self.echo_pin, GPIO.IN) def get_distance(self): distance_readings = [] for i in range(5): self._send_trigger_pulse() pulse_len = self._wait_for_echo() # Check for timeout if pulse_len == -1: return self.timeout_distance else: # Convert pulse time to distance distance_readings.append(pulse_len/self.time_distance_factor) min_distance = min(distance_readings) return min_distance def _send_trigger_pulse(self): """Internal Method""" GPIO.output(self.trigger_pin, 1) time.sleep(0.00015) GPIO.output(self.trigger_pin, 0) def _wait_for_echo(self, max_samples=5000): """Internal Method""" # First we wait for the ON/1 reading (which may timeout) for i in range(max_samples): if GPIO.input(self.echo_pin) == 1: # Now we time how long it takes to get the OFF/0 reading start = time.time() while GPIO.input(self.echo_pin) != 0: time.sleep(0.00001) # 10 microseconds total_time = time.time() - start return total_time # Return the Timeout value return -1 def test(): """Test for the Ultrasonic class""" import time distance_sensor = Ultrasonic() # Simply show the distance a bunch of times for i in range(50): print(distance_sensor.get_distance()) time.sleep(0.5) if __name__ == '__main__': # Run the test test()
/robot_kit-0.1.5.tar.gz/robot_kit-0.1.5/robot_kit/ultrasonic.py
0.538983
0.222721
ultrasonic.py
pypi
from robot_kit.PCA9685 import PCA9685 import time class Wheels: """A lightweight wrapper around the four servo motors driven by a PCA9685 chip Usage: wheels = Wheels() # General Movement wheels.all(1.0) # Move all wheels forward at full speed wheels.all(-0.5) # Move all wheels backward at half speed wheels.stop() # Stop all wheels # You can also do individual wheels for testing purposes wheels.left_front(1.0) # Turn the left front wheel full speed forward wheels.right_rear(-0.5) # Turn the right rear wheel half speed backward # Note: When using a 'low speed' (anything less then 0.3 or so the motors don't seem to respond well. I'm assuming this is because their might be some flaws in the example code that drives the PCA9685/PWM device """ def __init__(self, address=0x40): """Wheels Initialization""" self.address = address self.pwm = PCA9685(0x40, debug=True) self.pwm.setPWMFreq(50) # So these channels are taken from example code # FIXME: We should find out the logic/why of these channels self._wheel_channels = { 'left_front': [1, 0], 'left_rear': [2, 3], 'right_front': [7, 6], 'right_rear': [5, 4] } def all(self, speed): """Move all of the wheels at the given speed""" for wheel in ['left_front', 'left_rear', 'right_front', 'right_rear']: self._set_wheel_speed(wheel, speed) def stop(self): """Stop ALL of the wheels""" for wheel in ['left_front', 'left_rear', 'right_front', 'right_rear']: self._set_wheel_speed(wheel, 0.0) # Note: From this point on are testing methods, in normal operation # you probably shouldn't turn/operation/stop an individual wheel def left_front(self, speed): """Testing: Turn the left front wheel a certain speed Args: speed: float (range -1.0 to 1.0) """ self._set_wheel_speed('left_front', speed) def left_rear(self, speed): """Testing: Turn the left rear wheel a certain speed Args: speed: float (range -1.0 to 1.0) """ self._set_wheel_speed('left_rear', speed) def right_front(self, speed): """Testing: Turn the right front wheel a certain speed Args: speed: float (range -1.0 to 1.0) """ self._set_wheel_speed('right_front', speed) def right_rear(self, speed): """Testing: Turn the right rear wheel a certain speed Args: speed: float (range -1.0 to 1.0) """ self._set_wheel_speed('right_rear', speed) def test_wheels(self): """Testing: A method to test ALL wheels individually""" for wheel in ['left_front', 'left_rear', 'right_front', 'right_rear']: self.test_wheel(wheel) def test_wheel(self, wheel): """Helper method to test an individual wheel""" # Forward quarter speed and stop self._set_wheel_speed(wheel, 0.25) time.sleep(0.5) self._set_wheel_speed(wheel, 0.0) time.sleep(0.5) # Backward quarter speed and stop self._set_wheel_speed(wheel, -0.25) time.sleep(0.5) self._set_wheel_speed(wheel, 0.0) time.sleep(0.5) @staticmethod def _convert_range(value): """Internal: Convert our -1 to 1 range to the 12bit range of the PWM 'duty'""" # Check input range if value > 1.0: print('Value {:f} clamped to 1.0'.format(value)) value = 1.0 if value < -1.0: print('Value {:f} clamped to -1.0'.format(value)) value = -1.0 # Convert to 12bit (4096-1) range return int(value*4095) def _set_wheel_speed(self, wheel, speed): """Internal: This is an internal method to reduce copy/paste code""" # Look up wheel PWM channels channels = self._wheel_channels[wheel] # Convert speed to duty load duty = self._convert_range(speed) # Based on positive/negative value of duty we set one channel to 0 and one channel to duty value if duty > 0: self.pwm.setMotorPwm(channels[0], 0) self.pwm.setMotorPwm(channels[1], duty) else: self.pwm.setMotorPwm(channels[1], 0) self.pwm.setMotorPwm(channels[0], abs(duty)) def cleanup(self): """Method that's called when class is destroyed""" print('Cleanup...') def test(): """Test for the Wheels class""" wheels = Wheels() wheels.test_wheels() wheels.all(0.25) time.sleep(1.0) wheels.stop() time.sleep(1.0) wheels.cleanup() if __name__ == '__main__': # Run the test test()
/robot_kit-0.1.5.tar.gz/robot_kit-0.1.5/robot_kit/wheels.py
0.749821
0.715188
wheels.py
pypi
from robot_kit.wheels import Wheels import time class Vehicle: """A lightweight wrapper around the four servo motors driven by a PCA9685 chip Usage: vehicle = Vehicle() # General Movement vehicle.forward(1.0) # Go forward at full speed vehicle.backward(0.5) # Go backward at half speed vehicle.stop() # Stop the vehicle vehicle.turn_left(1.0) # Turn left at full speed vehicle.turn_right(0.5) # Turn right at half speed # Note: When using a 'low speed' (anything less then 0.3 or so the motors don't seem to respond well. I'm assuming this is because their might be some flaws in the example code that drives the PCA9685/PWM device """ def __init__(self): """Vehicle Initialization""" self.chip_address = 0x40 self.wheels = Wheels(address=self.chip_address) def forward(self, speed): """Move the vehicle forward at the given speed""" self.wheels.all(speed) def backward(self, speed): """Move the vehicle forward at the given speed""" self.wheels.all(-speed) def stop(self): """Stop ALL of the vehicle""" self.wheels.all(0.0) def turn_left(self, speed): """Turn left using alternate directions on vehicle""" self.wheels.left_front(-speed) self.wheels.left_rear(-speed) self.wheels.right_front(speed) self.wheels.right_rear(speed) def turn_right(self, speed): """Turn left using alternate directions on vehicle""" self.wheels.left_front(speed) self.wheels.left_rear(speed) self.wheels.right_front(-speed) self.wheels.right_rear(-speed) def cleanup(self): """Method that's called when class is destroyed""" print('Cleanup...') def test(): """Test for the Vehicle class""" vehicle = Vehicle() vehicle.forward(0.5) time.sleep(0.5) vehicle.backward(0.5) time.sleep(0.5) vehicle.turn_left(0.5) time.sleep(0.5) vehicle.turn_right(0.5) time.sleep(0.5) vehicle.stop() vehicle.cleanup() if __name__ == '__main__': # Run the test test()
/robot_kit-0.1.5.tar.gz/robot_kit-0.1.5/robot_kit/vehicle.py
0.846387
0.673661
vehicle.py
pypi
from PyQt5.QtCore import QThread, QMutex, QMutexLocker import icub_models import os import re from pathlib import Path import numpy as np import time import idyntree.swig as idyn from idyntree.visualize import MeshcatVisualizer from robot_log_visualizer.utils.utils import PeriodicThreadState class MeshcatProvider(QThread): def __init__(self, signal_provider, period): QThread.__init__(self) self._state = PeriodicThreadState.pause self.state_lock = QMutex() self._period = period self.meshcat_visualizer = MeshcatVisualizer() self._signal_provider = signal_provider self.custom_model_path = "" self.custom_package_dir = "" self.env_list = ["GAZEBO_MODEL_PATH", "ROS_PACKAGE_PATH", "AMENT_PREFIX_PATH"] @property def state(self): locker = QMutexLocker(self.state_lock) value = self._state return value @state.setter def state(self, new_state: PeriodicThreadState): locker = QMutexLocker(self.state_lock) self._state = new_state def load_model(self, considered_joints, model_name): def get_model_path_from_envs(env_list): return [ Path(f) if (env != "AMENT_PREFIX_PATH") else Path(f) / "share" for env in env_list if os.getenv(env) is not None for f in os.getenv(env).split(os.pathsep) ] def check_if_model_exist(folder_path, model): path = folder_path / Path(model) return path.is_dir() model_loader = idyn.ModelLoader() if self.custom_model_path: model_loader.loadReducedModelFromFile( self.custom_model_path, considered_joints, "urdf", [self.custom_package_dir], ) else: model_found_in_env_folders = False for folder in get_model_path_from_envs(self.env_list): if check_if_model_exist(folder, model_name): folder_model_path = folder / Path(model_name) model_filenames = [ folder_model_path / Path(f) for f in os.listdir(folder_model_path.absolute()) if re.search("[a-zA-Z0-9_]*\.urdf", f) ] if model_filenames: model_found_in_env_folders = True self.custom_model_path = str(model_filenames[0]) break if not model_found_in_env_folders: self.custom_model_path = str(icub_models.get_model_file(model_name)) model_loader.loadReducedModelFromFile( self.custom_model_path, considered_joints ) if not model_loader.isValid(): return False self.meshcat_visualizer.load_model( model_loader.model(), model_name="robot", color=0.8 ) return True def run(self): base_rotation = np.eye(3) base_position = np.array([0.0, 0.0, 0.0]) while True: start = time.time() if self.state == PeriodicThreadState.running: # These are the robot measured joint positions in radians joints = self._signal_provider.data[self._signal_provider.root_name][ "joints_state" ]["positions"]["data"] self.meshcat_visualizer.set_multibody_system_state( base_position, base_rotation, joint_value=joints[self._signal_provider.index, :], model_name="robot", ) sleep_time = self._period - (time.time() - start) if sleep_time > 0: time.sleep(sleep_time) if self.state == PeriodicThreadState.closed: return
/robot_log_visualizer-0.6.0-py3-none-any.whl/robot_log_visualizer/robot_visualizer/meshcat_provider.py
0.719581
0.175397
meshcat_provider.py
pypi
# PyQt from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure import matplotlib.animation as animation class MatplotlibViewerCanvas(FigureCanvas): """ Inherits from FigureCanvasQTAgg in order to integrate with PyQt. """ def __init__(self, parent, signal_provider, period): # create a new figure self.fig = Figure(dpi=100) # call FigureCanvas constructor FigureCanvas.__init__(self, self.fig) # set the parent of this FigureCanvas self.setParent(parent) # set signal provider self.signal_provider = signal_provider # setup the plot and the animations self.index = 0 # add plot to the figure self.axes = self.fig.add_subplot() # set axes labels self.axes.set_xlabel("time [s]") self.axes.set_ylabel("value") self.axes.grid(True) # start the vertical line animation (self.vertical_line,) = self.axes.plot([], [], "-", lw=1, c="k") self.periond_in_ms = int(period * 1000) # active paths self.active_paths = {} self.vertical_line_anim = animation.FuncAnimation( self.fig, self.update_vertical_line, init_func=self.init_vertical_line, interval=self.periond_in_ms, blit=True, ) # add plot toolbar from matplotlib self.toolbar = NavigationToolbar(self, self) def quit_animation(self): # https://stackoverflow.com/questions/32280140/cannot-delete-matplotlib-animation-funcanimation-objects # this is to close the event associated to the animation # this is required with matplotlib 3.1.2 but not with 3.5.1. # However this code will run with both version of matplotlib if self.vertical_line_anim: self.vertical_line_anim._stop() def update_plots(self, paths, legends): for path, legend in zip(paths, legends): path_string = "/".join(path) legend_string = "/".join(legend[1:]) if path_string not in self.active_paths.keys(): data = self.signal_provider.data for key in path[:-1]: data = data[key] try: datapoints = data["data"][:, int(path[-1])] except IndexError: # This happens in the case the variable is a scalar. datapoints = data["data"][:] timestamps = data["timestamps"] - self.signal_provider.initial_time (self.active_paths[path_string],) = self.axes.plot( timestamps, datapoints, label=legend_string ) paths_to_be_canceled = [] for active_path in self.active_paths.keys(): path = active_path.split("/") if path not in paths: paths_to_be_canceled.append(active_path) for path in paths_to_be_canceled: self.active_paths[path].remove() self.active_paths.pop(path) self.axes.set_xlim( 0, self.signal_provider.end_time - self.signal_provider.initial_time ) # Since a new plot has been added/removed we delete the old animation and we create a new one # TODO: this part could be optimized self.vertical_line_anim._stop() self.axes.legend() self.vertical_line_anim = animation.FuncAnimation( self.fig, self.update_vertical_line, init_func=self.init_vertical_line, interval=self.periond_in_ms, blit=True, ) def update_index(self, index): self.index = index def init_vertical_line(self): self.vertical_line.set_data([], []) return self.vertical_line, *(self.active_paths.values()) def update_vertical_line(self, _): """ Update the vertical line """ current_time = self.signal_provider.current_time # Draw vertical line at current index self.vertical_line.set_data([current_time, current_time], self.axes.get_ylim()) return self.vertical_line, *(self.active_paths.values())
/robot_log_visualizer-0.6.0-py3-none-any.whl/robot_log_visualizer/plotter/matplotlib_viewer_canvas.py
0.748352
0.417034
matplotlib_viewer_canvas.py
pypi
class RidexxCommands: """ side file runner """ GLOBAL = {} @staticmethod def add_selection(driver, element, value): # TODO return SeleniumIDERunner.do_nothing() @staticmethod def answer_on_next_prompt(driver, element, value): alert = driver.switchTo().alert() alert.accept() @staticmethod def assert_alert(driver, element, value): alert = driver.switchTo().alert() alert.accept() @staticmethod def assert_checked(driver, element, value): assert element.is_selected() @staticmethod def assert_not_checked(driver, element, value): assert not element.is_selected() @staticmethod def assert_confirmation(driver, element, value): alert = driver.switchTo().alert() assert value == alert.getText() @staticmethod def assert_editable(driver, element, value): element.is_enabled() assert element.get_attribute("readonly") @staticmethod def assert_not_editable(driver, element, value): element.is_enabled() assert not element.get_attribute("readonly") @staticmethod def assert_element_present(driver, element, value): assert element.length > 0 @staticmethod def assert_element_not_present(driver, element, value): assert element.length == 0 @staticmethod def assert_prompt(driver, element, value): assert value == driver.switchTo().alert().getText() @staticmethod def assert_selected_value(driver, element, value): assert element.getAttribute("value") == value @staticmethod def assert_not_selected_value(driver, element, value): assert element.getAttribute("value") != value @staticmethod def assert_text(driver, element, value): assert element.get_text() == value @staticmethod def assert_title(driver, element, value): assert driver.get_title() == value @staticmethod def assert_value(driver, element, value): assert element.get_value() == value @staticmethod def choose_cancel_on_next_confirmation(driver, element, value): driver.switchTo().alert().dismiss() @staticmethod def choose_cancel_on_next_prompt(driver, element, value): alert = driver.switchTo().alert() alert.sendKeys(value) alert.accept() @staticmethod def choose_ok_on_next_confirmation(driver, element, value): driver.switchTo().alert().accept() @staticmethod def click(driver, element, value): element.click() @staticmethod def click_at(driver, element, value): element.click() @staticmethod def double_click_at(driver, element, value): driver.actions().doubleClick(element).perform() @staticmethod def drag_and_drop_to_object(driver, element, value): to_object = driver.findElement(value) driver.actions().dragAndDrop(element, to_object).perform() @staticmethod def echo(driver, element, value): print(value) @staticmethod def edit_content(driver, element, value): driver.executeScript("if(arguments[0].contentEditable === 'true') {arguments[0].innerHTML = '{}'}".format(value)) @staticmethod def mouse_down_at(driver, element, value): driver.actions().mouseDown(element).perform() @staticmethod def mouse_move_at(driver, element, value): driver.actions().mouseDown(element).perform() @staticmethod def mouse_out(driver, element, value): size = element.size offsetx = (size['width'] / 2) + 1 offsety = (size['height'] / 2) + 1 driver.actions().move_to_element(element).move_by_offset(offsetx, offsety) # driver.actions().mouseOut(element).perform() @staticmethod def mouse_over(driver, element, value): driver.actions().move_to_element(element).perform() @staticmethod def mouse_up_at(driver, element, value): driver.actions().release(element).perform() @staticmethod def open(driver, element, value): driver.get(element) @staticmethod def pause(driver, element, value): from time import sleep sleep(value) @staticmethod def remove_selection(driver, element, value): element.click() @staticmethod def run_script(driver, element, value): driver.execute_script(value) @staticmethod def select(driver, element, value): element.option.click() @staticmethod def select_frame(driver, element, value): driver.switchTo().frame(value) @staticmethod def select_window(driver, element, value): driver.switchTo().window(value) @staticmethod def send_keys(driver, element, value): element.send_keys(value) @staticmethod def set_speed(driver, element, value): SeleniumIDERunner.do_nothing() @staticmethod def store(driver, element, value): # driver.execute_script("var {} = '{}';".format(element, value)) SeleniumIDERunner.GLOBAL[element] = value @staticmethod def store_text(driver, element, value): SeleniumIDERunner.GLOBAL[value] = element.get_text() @staticmethod def store_title(driver, element, value): SeleniumIDERunner.GLOBAL[value] = element.get_text() @staticmethod def submit(driver, element, value): element.submit() @staticmethod def type(driver, element, value): element.send_keys(value) @staticmethod def verify_checked(driver, element, value): assert element.is_selected(value) @staticmethod def verify_not_checked(driver, element, value): assert not element.is_selected(value) @staticmethod def verify_editable(driver, element, value): assert element.is_enabled() or element.get_attribute("readonly") @staticmethod def verify_not_editable(driver, element, value): assert not (element.is_enabled() or element.get_attribute("readonly")) @staticmethod def verify_element_present(driver, element, value): assert element is not None @staticmethod def verify_element_not_present(driver, element, value): assert element is None @staticmethod def verify_selected_value(driver, element, value): assert element.get_attribute("value") == value @staticmethod def verify_not_selected_value(driver, element, value): assert element.get_attribute("value") != value @staticmethod def verify_text(driver, element, value): assert element.get_text() == value @staticmethod def verify_title(driver, element, value): assert element.get_title() == value @staticmethod def verify_value(driver, element, value): assert element.get_attribute("value") == value @staticmethod def webdriver_answer_on_next_prompt(driver, element, value): SeleniumIDERunner.answer_on_next_prompt(driver, element, value) @staticmethod def webdriver_choose_cancel_on_next_confirmation(driver, element, value): SeleniumIDERunner.answer_on_next_prompt(driver, element, value) @staticmethod def webdriver_choose_cancel_on_next_prompt(driver, element, value): SeleniumIDERunner.choose_cancel_on_next_prompt(driver, element, value) @staticmethod def webdriver_choose_ok_on_next_confirmation(driver, element, value): SeleniumIDERunner.choose_ok_on_next_confirmation(driver, element, value)
/robot_md_launcher-0.2.tar.gz/robot_md_launcher-0.2/robot_md_launcher/ridexx_commands.py
0.420243
0.593197
ridexx_commands.py
pypi
import logging, enum import networkx as nx import io import kgprim.core as gr class Joint: ''' A placeholder for a joint of a mechanism. A joint only has a name and a kind, the latter being one of the values defined in `JointKind`. A joint always connects two and only two `Link`s. ''' def __init__(self, name, kind): self.name = name self.kind = kind def __eq__(self, rhs): return isinstance(rhs, Joint) and\ self.name==rhs.name and self.kind==rhs.kind def __hash__(self): return 31 * hash(self.name) + 97 * hash(self.kind) def __str__(self): return self.name def __repr__(self): return self.name class JointKind(enum.Enum): prismatic="prismatic", revolute ="revolute" class Link(gr.RigidBody): ''' A placeholder for a rigid link of a mechanism. A `Link` is just a named instance of `kgprim.gr.RigidBody`. ''' def __init__(self, name): super().__init__(name) def __eq__(self, rhs): return isinstance(rhs, Link) and self.name==rhs.name def __hash__(self): return 47 * hash(self.name) def __str__(self): return self.name class KPair: ''' A Kinematic-Pair, that is a pair of links connected by a joint ''' def __init__(self, joint, link1, link2): self.joint = joint self.link1 = link1 self.link2 = link2 class Robot: ''' The connectivity model of an articulated robot. ''' def __init__(self, name, links, joints, pairs): self.log = logging.getLogger('robot') self._name = name self.links = links # by-name map self.joints= joints # by-name map # A map from joint to links in the pair self.pairs = {kp.joint: (kp.link1, kp.link2) for kp in pairs} self.nB = len(self.links) # number of bodies self.nJ = len(self.joints) # number of joints self.nLoopJ = self.nJ - (self.nB - 1) # number of loop joints # The connectivity graph. # Note that it can be used as a map bewteen link-pairs to joint self.graph = nx.Graph() self.graph.add_edges_from( [ (p.link1, p.link2, {'joint': p.joint}) for p in pairs ] ) self.loops = nx.cycle_basis(self.graph) self._checks() @property def name(self) : return self._name def hasLoops(self): return self.nLoopJ > 0 def linkPairToJoint(self, link1, link2): ''' The `Joint` connecting the two given links, `None` if the links are not part of a pair. ''' if not self.graph.has_edge(link1, link2) : return None return self.graph[link1][link2]['joint'] def jointToLinkPair(self, joint): ''' The `KPair` object whose joint is the given joint ''' return self.pairs[joint] def path(self, link1, link2): return nx.shortest_path(self.graph, link1, link2) def __str__(self): text = 'Robot ' + self.name + '\n' for l in self.links.values(): text += ' ' + l.name + '\n' text += '\n' for ed in self.graph.edges(data=True): text += ed[2]['joint'].name + ' between ' + ed[0].name + ' and ' + ed[1].name + '\n' return text def _checks(self): if not nx.connected.is_connected( self.graph ) : self.log.error('The robot graph is not connected') else : self.log.debug("OK, the robot graph is connected") def fromDict(data): ''' Create a connectivity model from input data in a dictionary. The dictionary is expected to have the following format: - a key 'name' with the robot name - a key 'links' which is a list of robot link names - a key 'joints' which is a list of dictionaries, each having the entries 'name' and 'kind' which are both strings - a key 'pairs' which is a list of dictionaries, each having three entries: 'joint', 'link1', 'link2', each having a name as the value This format is the same as the YAML connectivity model format used in this, project. See the sample models in the repository. ''' rname = data['name'] links = { name:Link(name) for name in data['links'] } joints= { j['name']:Joint(j['name'], JointKind[j['kind']]) for j in data['joints'] } pairs = [ KPair( joints[ p['joint'] ], links [ p['link1'] ], links [ p['link2'] ] ) for p in data['pairs'] ] return Robot(rname, links, joints, pairs) def graphToString(graph): text = '' for link in graph.nodes() : text += link.name + ' ' return text
/robot-model-tools-0.1.0.tar.gz/robot-model-tools-0.1.0/src/robmodel/connectivity.py
0.859487
0.510313
connectivity.py
pypi
import networkx as nx class TreeUtils: ''' Helper object for the inspection of the tree structure of a robot model. This class works only with an ordered connectivity model with tree topology. An ordered connectivity model is an instance of `robmodel.ordering.Robot`. With an ordered kinematic tree (i.e. a model with a numbering scheme for the links) additional parentship relations can be established among the robot links. This class exposes such relations. ''' def __init__(self, rob): ''' The `rob` argument must be a kinematic tree model with a numbering scheme. ''' if not hasattr(rob, 'dgraph') : raise RuntimeError('''Tree-utils can only be constructed with an ordered robot model, i.e. one whose treeModel graph is directed''') if rob.hasLoops() : raise RuntimeError("Kinematic loops detected, TreeUtils only accepts kinematic trees") self.robot = rob self.parentToChild = rob.dgraph self.childToParent = nx.reverse_view(self.parentToChild) def connectingJoint(self, l1, l2): ''' The joint joining the two given links, None if there is no such a joint. ''' return self.robot.linkPairToJoint(l1, l2) def parent(self, l): ''' The parent link of the given link ''' if l == self.robot.base : return None return self.childToParent.neighbors(l).__next__() def children(self, l): ''' The list of children of the given link ''' return self.parentToChild.neighbors(l) def isLeaf(self, l): ''' Return True if the given link has no children ''' return len(self.parentToChild._succ[l]) == 0 def supportingJoint(self, l): ''' The joint for which the given link is the successor. None if the link is the robot base. ''' if l == self.robot.base : return None return self.connectingJoint(l, self.parent(l)) def ancestorOf(self, possibleAncestor, target): ''' Tells whether a link is an ancestor of another link. Any link is an ancestor/descendant of itself. Returns True if `target` belongs to a kinematic subtree rooted at `possibleAncestor`, False otherwise. ''' if ((possibleAncestor == None) or (target == None)): return False if self.robot.linkNum(possibleAncestor) > self.robot.linkNum(target) : return False return possibleAncestor==target or self.ancestorOf(possibleAncestor, self.parent(target)) def lowestCommonAncestor(self, l1, l2): ''' The lowest common ancestor of the two given links. The lowest common ancestor is the deepest (i.e. farthest from the base) link which is an ancestor of both the arguments. ''' if ((l1 == None) or (l2 == None)): return None lca = None current1 = l1 current2 = l2 while lca == None : if self.ancestorOf(current1, l2): lca = current1 else: current1 = self.parent(current1) #// cant be null, as that implies current1 is the base, and the base is always an ancestor if self.ancestorOf(current2, l1): lca = current2 else: current2 = self.parent(current2) return lca
/robot-model-tools-0.1.0.tar.gz/robot-model-tools-0.1.0/src/robmodel/treeutils.py
0.841142
0.71561
treeutils.py
pypi
import logging import networkx as nx import robmodel.connectivity log = logging.getLogger(__name__) class Robot(robmodel.connectivity.Robot): ''' The composition of a basic robot model with a numbering scheme for the links. A numeric code for each link and joint is required. The codes for the links must be integers from 0 to NB-1, in sequence, where NB is the number of links. Tree joints do not need an explicit code, because they get the same code as the successor link. Loop joints, on the other hand, do need to be given a code by the user. Joint codes range from 1 to NJ, where NJ is the number of joints. In conclusion, a code for each link and each loop joint is required. This amounts to a number of codes equal to the total number of joints plus 1 (for the robot base link). ''' def __init__(self, robotm, numbering): ''' Arguments: - `robotm`: a connectivity model, typically an instance of `robmodel.connectivity.Robot` - `numbering`: a dictionary with two keys, "robot" and "nums". The value of "robot" must be the same as the connectivity model name. The value of "nums" must be another dictionary with link names and loop joint names as keys, and numeric codes as values. ''' if 'robot' not in numbering : raise RuntimeError("Expected key 'robot' in the numbering-scheme dictionary") if robotm.name != numbering['robot'] : raise ValueError('''The given numbering scheme and robot model do not match ({0} vs {1})''' .format(numbering.robot, robotm.name)) nums = numbering['nums'] if not self._consistency(robotm, nums) : raise RuntimeError("Inconsistent arguments") self.connectivity = robotm self.itemNameToCode = nums self.codeToLink = {nums[l.name] : l for l in robotm.links.values() } self.codeToJoint = {} self.jointToCode = {} for joint in robotm.joints.values() : # If there is an entry for a joint, it means that was selected as # a loop joint if joint.name in nums: self.jointToCode[ joint ] = nums[joint.name] self.codeToJoint[ nums[joint.name] ] = joint else : # Otherwise it is a tree joint, and it gets the highest code # among the codes of the two links of its pair code = max( [nums[l.name] for l in robotm.jointToLinkPair(joint)] ) self.jointToCode[ joint ] = code self.codeToJoint[ code ] = joint dGraph = nx.DiGraph() dGraph.add_nodes_from( robotm.graph ) # links # Add directed edges starting from the link with the lower ID self.orderedPairs = {} for joint in robotm.joints.values() : kp = robotm.jointToLinkPair( joint ) if nums[kp[0].name]<nums[kp[1].name]: dGraph.add_edge( kp[0], kp[1], joint=joint) self.orderedPairs[joint] = (kp[0], kp[1]) else: dGraph.add_edge( kp[1], kp[0], joint=joint) self.orderedPairs[joint] = (kp[1], kp[0]) self.dgraph = dGraph @property def base(self): return self.codeToLink[0] def linkNum (self, link): return self.itemNameToCode[link.name] def jointNum(self, joint): return self.jointToCode[joint] def predecessor(self, joint): return self.orderedPairs[joint][0] def successor(self, joint): return self.orderedPairs[joint][1] def __getattr__(self, name): return getattr(self.connectivity, name) def _consistency(self, connectivity, nums): links = connectivity.links if len(nums) != (connectivity.nJ + 1) : log.error("Wrong number of entries in the numbering scheme (found {0}, expected {1})" .format(len(nums), connectivity.nJ+1) ) return False for link_name in links.keys() : if link_name not in nums : log.error("Link '{l}' not found in the numbering scheme".format(l=link_name)) return False for link_name in nums.keys() : if link_name not in links : log.warning("Name '{l}' (referenced in the numbering scheme) is not a link of robot '{r}'" .format(l=link_name, r=robotm.name)) # Check for duplicate or missing IDs max = len(links)-1 ids = list( nums.values() ) ids.sort() prev = -22 for id in ids : if id == prev : log.warning("Duplicate link ID {0}".format(id)) if id > max : log.warning("Link ID {0} out of range".format(id)) prev= id for i in range(0, max+1) : if i not in ids : log.warning("Missing link ID {0}".format(i)) return True def __str__(self): text = 'Robot ' + self.name + '\n' for code in range(0,len(self.links)) : text += "Link {0} (# {1})\n".format(self.codeToLink[code].name, code) text += '\n' for joint in self.joints.values() : lp = self.jointToLinkPair(joint) text += "Joint {0} (# {1}) between {2} and {3}\n".format( joint.name, self.jointNum(joint), lp[0].name, lp[1].name) return text def numberingSchemeFromDict(data): rname = data['robot'] nums = data['nums'] num = {'robot' : rname, 'nums': nums} return num
/robot-model-tools-0.1.0.tar.gz/robot-model-tools-0.1.0/src/robmodel/ordering.py
0.762513
0.650401
ordering.py
pypi