repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
moonso/vcf_parser
|
vcf_parser/parser.py
|
cli
|
python
|
def cli(variant_file, vep, split):
from datetime import datetime
from pprint import pprint as pp
if variant_file == '-':
my_parser = VCFParser(fsock=sys.stdin, split_variants=split)
else:
my_parser = VCFParser(infile = variant_file, split_variants=split)
start = datetime.now()
nr_of_variants = 0
for line in my_parser.metadata.print_header():
print(line)
for variant in my_parser:
pp(variant)
nr_of_variants += 1
print('Number of variants: %s' % nr_of_variants)
|
Parses a vcf file.\n
\n
Usage:\n
parser infile.vcf\n
If pipe:\n
parser -
|
train
|
https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/parser.py#L284-L305
|
[
"def print_header(self):\n \"\"\"Returns a list with the header lines if proper format\"\"\"\n lines_to_print = []\n lines_to_print.append('##fileformat='+self.fileformat)\n if self.filedate:\n lines_to_print.append('##fileformat='+self.fileformat)\n\n for filt in self.filter_dict:\n lines_to_print.append(self.filter_dict[filt])\n for form in self.format_dict:\n lines_to_print.append(self.format_dict[form])\n for info in self.info_dict:\n lines_to_print.append(self.info_dict[info])\n for contig in self.contig_dict:\n lines_to_print.append(self.contig_dict[contig])\n for alt in self.alt_dict:\n lines_to_print.append(self.alt_dict[alt])\n for other in self.other_dict:\n lines_to_print.append(self.other_dict[other])\n lines_to_print.append('#'+ '\\t'.join(self.header))\n return lines_to_print\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""
vcf_parser.py
Parse a vcf file.
Includes a header class for storing information about the headers.
Create variant objects and a dictionary with individuals that have a dictionary with genotypes for each variant.
Thanks to PyVCF for heaader parser and more...:
Copyright (c) 2011-2012, Population Genetics Technologies Ltd, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the Population Genetics Technologies Ltd nor the names of
its contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Copyright (c) 2011 John Dougherty
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Created by Måns Magnusson on 2013-01-17.
Copyright (c) 2013 __MyCompanyName__. All rights reserved.
"""
from __future__ import print_function
import sys
import os
import gzip
import re
import pkg_resources
import click
import locale
import logging
from codecs import open, getreader
from vcf_parser import (Genotype, HeaderParser)
from vcf_parser.utils import (format_variant, split_variants)
#### Parser: ####
class VCFParser(object):
"""docstring for VCFParser"""
def __init__(self, infile=None, fsock=None, split_variants=False,
check_info=False, allele_symbol='0', fileformat = None):
super(VCFParser, self).__init__()
self.logger = logging.getLogger(__name__)
self.vcf = None
self.logger.debug("Set self.vcf to:{0}".format(self.vcf))
self.beginning = True
self.infile = infile
self.fsock = fsock
self.split_variants = split_variants
self.logger.info("Split variants = {0}".format(self.split_variants))
self.fileformat = fileformat
self.check_info = check_info
self.logger.info("check info = {0}".format(self.check_info))
self.allele_symbol = allele_symbol
self.logger.info("Allele symbol = {0}".format(self.allele_symbol))
self.logger.info("Initializing HeaderParser")
self.metadata = HeaderParser()
# These are the individuals described in the header
self.individuals = []
# This is the header line of the vcf
self.header = []
# If there are no file or stream the user can add variants manually.
# These will be added to self.variants
self.variants = []
if (fsock or infile):
if fsock:
if not infile and hasattr(fsock, 'name'):
self.logger.info("Reading vcf form stdin")
if sys.version_info < (3, 0):
self.logger.info("Using codecs to read stdin")
sys.stdin = getreader('utf-8')(fsock)
self.vcf = sys.stdin
else:
self.logger.info("Reading vcf form file {0}".format(infile))
file_name, file_extension = os.path.splitext(infile)
if file_extension == '.gz':
self.logger.debug("Vcf is zipped")
self.vcf = getreader('utf-8')(gzip.open(infile), errors='replace')
elif file_extension == '.vcf':
self.vcf = open(infile, mode='r', encoding='utf-8', errors='replace')
else:
raise IOError("File is not in a supported format!\n"
" Or use correct ending(.vcf or .vcf.gz)")
self.logger.debug("Reading first line.")
self.next_line = self.vcf.readline().rstrip()
self.current_line = self.next_line
# First line is allways a metadata line
if not self.next_line.startswith('#'):
raise IOError("VCF files allways have to start with a metadata line.")
self.metadata.parse_meta_data(self.next_line)
# Parse the metadata lines
while self.next_line.startswith('#'):
if self.next_line.startswith('##'):
self.metadata.parse_meta_data(self.next_line)
elif self.next_line.startswith('#'):
self.metadata.parse_header_line(self.next_line)
self.next_line = self.vcf.readline().rstrip()
self.individuals = self.metadata.individuals
self.logger.info("Setting self.individuals to {0}".format(
self.individuals
))
self.header = self.metadata.header
self.vep_header = self.metadata.vep_columns
else:
if not self.fileformat:
raise IOError("Please initialize with a fileformat.")
else:
self.metadata.fileformat = self.fileformat
def add_variant(self, chrom, pos, rs_id, ref, alt, qual, filt, info, form=None, genotypes=[]):
"""
Add a variant to the parser.
This function is for building a vcf. It takes the relevant parameters
and make a vcf variant in the proper format.
"""
variant_info = [chrom, pos, rs_id, ref, alt, qual, filt, info]
if form:
variant_info.append(form)
for individual in genotypes:
variant_info.append(individual)
variant_line = '\t'.join(variant_info)
variant = format_variant(
line = variant_line,
header_parser = self.metadata,
check_info = self.check_info
)
if not (self.split_variants and len(variant['ALT'].split(',')) > 1):
self.variants.append(variant)
# If multiple alternative and split_variants we must split the variant
else:
for splitted_variant in split_variants(
variant_dict=variant,
header_parser=self.metadata,
allele_symbol=self.allele_symbol):
self.variants.append(splitted_variant)
def __iter__(self):
if not self.metadata.fileformat:
raise SyntaxError("Vcf must have fileformat defined")
if self.vcf:
# We need to treat the first case as an exception
if self.beginning:
variants = []
if self.next_line:
first_variant = format_variant(
line = self.next_line,
header_parser = self.metadata,
check_info = self.check_info
)
if not (self.split_variants and len(first_variant['ALT'].split(',')) > 1):
variants.append(first_variant)
else:
for splitted_variant in split_variants(
variant_dict=first_variant,
header_parser=self.metadata,
allele_symbol=self.allele_symbol):
variants.append(splitted_variant)
for variant in variants:
yield variant
self.beginning = False
for line in self.vcf:
line = line.rstrip()
# These are the variant(s) found in one line of the vcf
# If there are multiple alternatives and self.split_variants
# There can be more than one variant in one line
variants = []
if not line.startswith('#') and len(line.split('\t')) >= 8:
variant = format_variant(
line = line,
header_parser = self.metadata,
check_info = self.check_info
)
if not (self.split_variants and len(variant['ALT'].split(',')) > 1):
variants.append(variant)
else:
for splitted_variant in split_variants(
variant_dict=variant,
header_parser=self.metadata,
allele_symbol=self.allele_symbol):
variants.append(splitted_variant)
for variant in variants:
yield variant
else:
for variant in self.variants:
yield variant
def __repr__(self):
return "Parser(infile={0},fsock={1},split_variants={2})".format(
self.infile, self.fsock, self.split_variants
)
@click.command()
@click.argument('variant_file',
type=click.Path(),
metavar='<vcf_file> or -'
)
@click.option('--vep',
is_flag=True,
help='If variants are annotated with the Variant Effect Predictor.'
)
@click.option('-s' ,'--split',
is_flag=True,
help='Split the variants with multiallelic calls.'
)
# print('Time to parse: %s' % str(datetime.now()-start))
# pp(my_parser.metadata.extra_info)
if __name__ == '__main__':
cli()
|
moonso/vcf_parser
|
vcf_parser/parser.py
|
VCFParser.add_variant
|
python
|
def add_variant(self, chrom, pos, rs_id, ref, alt, qual, filt, info, form=None, genotypes=[]):
variant_info = [chrom, pos, rs_id, ref, alt, qual, filt, info]
if form:
variant_info.append(form)
for individual in genotypes:
variant_info.append(individual)
variant_line = '\t'.join(variant_info)
variant = format_variant(
line = variant_line,
header_parser = self.metadata,
check_info = self.check_info
)
if not (self.split_variants and len(variant['ALT'].split(',')) > 1):
self.variants.append(variant)
# If multiple alternative and split_variants we must split the variant
else:
for splitted_variant in split_variants(
variant_dict=variant,
header_parser=self.metadata,
allele_symbol=self.allele_symbol):
self.variants.append(splitted_variant)
|
Add a variant to the parser.
This function is for building a vcf. It takes the relevant parameters
and make a vcf variant in the proper format.
|
train
|
https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/parser.py#L173-L202
|
[
"def format_variant(line, header_parser, check_info=False):\n \"\"\"\n Yield the variant in the right format. \n\n If the variants should be splitted on alternative alles one variant \n for each alternative will be yielded.\n\n Arguments:\n line (str): A string that represents a variant line in the vcf format\n header_parser (HeaderParser): A HeaderParser object\n check_info (bool): If the info fields should be checked\n\n Yields:\n variant (dict): A dictionary with the variant information. The number\n of variants yielded depends on if split variant is used\n and how many alternatives there are\n\n \"\"\"\n logger = getLogger(__name__)\n\n individuals = []\n\n vcf_header = header_parser.header\n\n individuals = header_parser.individuals\n\n variant_line = line.rstrip().split('\\t')\n\n logger.debug(\"Checking if variant line is malformed\")\n if len(vcf_header) != len(variant_line):\n raise SyntaxError(\"One of the variant lines is malformed: {0}\".format(\n line\n ))\n\n variant = dict(zip(vcf_header, variant_line))\n\n\n # A dictionary with the vep information\n variant['vep_info'] = {}\n # A dictionary with the genetic models (family ids as keys)\n variant['genetic_models'] = {}\n # A dictionary with genotype objects (individual ids as keys)\n variant['genotypes'] = {}\n # A dictionary with the compounds (family ids as keys)\n variant['compound_variants'] = {}\n # A dictionary with the rank scores (family ids as keys)\n variant['rank_scores'] = {}\n\n variant['individual_scores'] = {}\n\n alternatives = variant['ALT'].split(',')\n\n info_dict = build_info_dict(variant.get('INFO', ''))\n\n #For testing\n\n # Check that the entry is on the proper format_\n if check_info:\n for info in info_dict:\n annotation = info_dict[info]\n extra_info = header_parser.extra_info.get(info, None)\n\n if not extra_info:\n raise SyntaxError(\"The INFO field {0} is not specified in vcf\"\\\n \" header. {1}\".format(info, line))\n try:\n check_info_annotation(annotation, info, extra_info, alternatives, individuals)\n except SyntaxError as e:\n logger.critical(e)\n logger.info(\"Line:{0}\".format(line))\n raise e\n\n variant['info_dict'] = info_dict\n\n #################### Some fields require special parsing ###########################\n\n ##### VEP ANNOTATIONS #####\n if 'CSQ' in info_dict:\n vep_columns = header_parser.vep_columns\n variant['vep_info'] = build_vep_annotation(\n info_dict['CSQ'], \n variant['REF'], \n alternatives,\n vep_columns\n )\n\n ##### GENMOD ANNOTATIONS #####\n\n if 'GeneticModels' in info_dict:\n variant['genetic_models'] = build_models_dict(\n info_dict['GeneticModels'])\n\n if 'Compounds' in info_dict:\n variant['compound_variants'] = build_compounds_dict(\n info_dict['Compounds'])\n\n if 'RankScore' in info_dict:\n variant['rank_scores'] = build_rank_score_dict(\n info_dict['RankScore'])\n\n if 'IndividualRankScore' in info_dict:\n variant['individual_scores'] = build_rank_score_dict(\n info_dict['IndividualRankScore'])\n\n ##### GENOTYPE ANNOTATIONS #####\n\n gt_format = variant.get('FORMAT', '').split(':')\n\n genotype_dict = {}\n for individual in individuals:\n gt_info = variant[individual].split(':')\n gt_call = dict(zip(gt_format, gt_info))\n\n #Create a genotype object for this individual\n genotype_dict[individual] = Genotype(**gt_call)\n\n variant['genotypes'] = genotype_dict\n\n variant['variant_id'] = '_'.join(\n [\n variant['CHROM'],\n variant['POS'],\n variant['REF'],\n alternatives[0]\n ]\n )\n\n return variant\n",
"def split_variants(variant_dict, header_parser, allele_symbol='0'):\n \"\"\"\n Checks if there are multiple alternative alleles and splitts the \n variant.\n If there are multiple alternatives the info fields, vep annotations \n and genotype calls will be splitted in the correct way\n\n Args:\n variant_dict: a dictionary with the variant information\n\n Yields:\n variant: A variant dictionary with the splitted information for each\n alternative\n \"\"\"\n logger = getLogger(__name__)\n logger.info(\"Allele symbol {0}\".format(allele_symbol))\n alternatives = variant_dict['ALT'].split(',')\n reference = variant_dict['REF']\n number_of_values = 1\n # Go through each of the alternative alleles:\n for alternative_number, alternative in enumerate(alternatives):\n variant = {}\n info_dict = OrderedDict()\n # This is a dict on the form {ALT:[{vep_info_dict}]}\n vep_dict = {}\n genotype_dict = {}\n variant['CHROM'] = variant_dict['CHROM']\n variant['POS'] = variant_dict['POS']\n try:\n # There will not allways be one rsID for each alternative\n variant['ID'] = variant_dict['ID'].split(';')[alternative_number]\n # If only one id is present for multiple alleles they all get the same ID\n except IndexError:\n variant['ID'] = variant_dict['ID']\n\n variant['REF'] = variant_dict['REF']\n variant['ALT'] = alternative\n variant['QUAL'] = variant_dict['QUAL']\n variant['FILTER'] = variant_dict['FILTER']\n\n\n if 'FORMAT' in variant_dict:\n gt_format = variant_dict['FORMAT']\n variant['FORMAT'] = gt_format\n\n for info in variant_dict['info_dict']:\n if info and info != '.':\n # Check if the info field have one entry per allele:\n number_of_values = header_parser.extra_info[info]['Number']\n\n if info == 'CSQ':\n vep_dict[alternative] = variant_dict['vep_info'][alternative]\n if vep_dict[alternative]:\n info_dict['CSQ'] = [\n build_vep_string(\n vep_dict[alternative], \n header_parser.vep_columns\n )\n ]\n # If there is one value per allele we need to split it in\n # the proper way\n elif number_of_values == 'A':\n try:\n # When we split the alleles we only want to annotate with the correct number\n info_dict[info] = [variant_dict['info_dict'][info][alternative_number]]\n except IndexError:\n # If there is only one annotation we choose that one\n info_dict[info] = [variant_dict['info_dict'][info][0]]\n # Choose the right vep info from the old variant\n elif number_of_values == 'R':\n reference_value = variant_dict['info_dict'][info][0]\n new_info = [reference_value]\n try:\n # When we split the alleles we only want to annotate with the correct number\n allele_value = variant_dict['info_dict'][info][alternative_number + 1]\n new_info.append(allele_value)\n info_dict[info] = new_info\n except IndexError:\n # If annotation is missing we keep the original annotation\n info_dict[info] = variant_dict['info_dict'][info]\n\n else:\n info_dict[info] = variant_dict['info_dict'][info]\n\n else:\n info_dict[info] = []\n\n variant['INFO'] = build_info_string(info_dict)\n\n for individual in variant_dict['genotypes']:\n new_genotype = split_genotype(\n variant_dict[individual], \n variant['FORMAT'], \n alternative_number, \n allele_symbol\n )\n\n variant[individual] = new_genotype\n genotype_dict[individual] = Genotype(**dict(zip(gt_format.split(':'), variant[individual].split(':'))))\n\n variant['info_dict'] = info_dict\n variant['vep_info'] = vep_dict\n variant['genotypes'] = genotype_dict\n variant['variant_id'] = '_'.join([variant['CHROM'],\n variant['POS'],\n variant['REF'],\n alternative])\n yield variant\n"
] |
class VCFParser(object):
"""docstring for VCFParser"""
def __init__(self, infile=None, fsock=None, split_variants=False,
check_info=False, allele_symbol='0', fileformat = None):
super(VCFParser, self).__init__()
self.logger = logging.getLogger(__name__)
self.vcf = None
self.logger.debug("Set self.vcf to:{0}".format(self.vcf))
self.beginning = True
self.infile = infile
self.fsock = fsock
self.split_variants = split_variants
self.logger.info("Split variants = {0}".format(self.split_variants))
self.fileformat = fileformat
self.check_info = check_info
self.logger.info("check info = {0}".format(self.check_info))
self.allele_symbol = allele_symbol
self.logger.info("Allele symbol = {0}".format(self.allele_symbol))
self.logger.info("Initializing HeaderParser")
self.metadata = HeaderParser()
# These are the individuals described in the header
self.individuals = []
# This is the header line of the vcf
self.header = []
# If there are no file or stream the user can add variants manually.
# These will be added to self.variants
self.variants = []
if (fsock or infile):
if fsock:
if not infile and hasattr(fsock, 'name'):
self.logger.info("Reading vcf form stdin")
if sys.version_info < (3, 0):
self.logger.info("Using codecs to read stdin")
sys.stdin = getreader('utf-8')(fsock)
self.vcf = sys.stdin
else:
self.logger.info("Reading vcf form file {0}".format(infile))
file_name, file_extension = os.path.splitext(infile)
if file_extension == '.gz':
self.logger.debug("Vcf is zipped")
self.vcf = getreader('utf-8')(gzip.open(infile), errors='replace')
elif file_extension == '.vcf':
self.vcf = open(infile, mode='r', encoding='utf-8', errors='replace')
else:
raise IOError("File is not in a supported format!\n"
" Or use correct ending(.vcf or .vcf.gz)")
self.logger.debug("Reading first line.")
self.next_line = self.vcf.readline().rstrip()
self.current_line = self.next_line
# First line is allways a metadata line
if not self.next_line.startswith('#'):
raise IOError("VCF files allways have to start with a metadata line.")
self.metadata.parse_meta_data(self.next_line)
# Parse the metadata lines
while self.next_line.startswith('#'):
if self.next_line.startswith('##'):
self.metadata.parse_meta_data(self.next_line)
elif self.next_line.startswith('#'):
self.metadata.parse_header_line(self.next_line)
self.next_line = self.vcf.readline().rstrip()
self.individuals = self.metadata.individuals
self.logger.info("Setting self.individuals to {0}".format(
self.individuals
))
self.header = self.metadata.header
self.vep_header = self.metadata.vep_columns
else:
if not self.fileformat:
raise IOError("Please initialize with a fileformat.")
else:
self.metadata.fileformat = self.fileformat
def add_variant(self, chrom, pos, rs_id, ref, alt, qual, filt, info, form=None, genotypes=[]):
"""
Add a variant to the parser.
This function is for building a vcf. It takes the relevant parameters
and make a vcf variant in the proper format.
"""
variant_info = [chrom, pos, rs_id, ref, alt, qual, filt, info]
if form:
variant_info.append(form)
for individual in genotypes:
variant_info.append(individual)
variant_line = '\t'.join(variant_info)
variant = format_variant(
line = variant_line,
header_parser = self.metadata,
check_info = self.check_info
)
if not (self.split_variants and len(variant['ALT'].split(',')) > 1):
self.variants.append(variant)
# If multiple alternative and split_variants we must split the variant
else:
for splitted_variant in split_variants(
variant_dict=variant,
header_parser=self.metadata,
allele_symbol=self.allele_symbol):
self.variants.append(splitted_variant)
def __iter__(self):
if not self.metadata.fileformat:
raise SyntaxError("Vcf must have fileformat defined")
if self.vcf:
# We need to treat the first case as an exception
if self.beginning:
variants = []
if self.next_line:
first_variant = format_variant(
line = self.next_line,
header_parser = self.metadata,
check_info = self.check_info
)
if not (self.split_variants and len(first_variant['ALT'].split(',')) > 1):
variants.append(first_variant)
else:
for splitted_variant in split_variants(
variant_dict=first_variant,
header_parser=self.metadata,
allele_symbol=self.allele_symbol):
variants.append(splitted_variant)
for variant in variants:
yield variant
self.beginning = False
for line in self.vcf:
line = line.rstrip()
# These are the variant(s) found in one line of the vcf
# If there are multiple alternatives and self.split_variants
# There can be more than one variant in one line
variants = []
if not line.startswith('#') and len(line.split('\t')) >= 8:
variant = format_variant(
line = line,
header_parser = self.metadata,
check_info = self.check_info
)
if not (self.split_variants and len(variant['ALT'].split(',')) > 1):
variants.append(variant)
else:
for splitted_variant in split_variants(
variant_dict=variant,
header_parser=self.metadata,
allele_symbol=self.allele_symbol):
variants.append(splitted_variant)
for variant in variants:
yield variant
else:
for variant in self.variants:
yield variant
def __repr__(self):
return "Parser(infile={0},fsock={1},split_variants={2})".format(
self.infile, self.fsock, self.split_variants
)
|
moonso/vcf_parser
|
vcf_parser/utils/build_vep.py
|
build_vep_string
|
python
|
def build_vep_string(vep_info, vep_columns):
logger = getLogger(__name__)
logger.debug("Building vep string from {0}".format(vep_info))
logger.debug("Found vep headers {0}".format(vep_columns))
vep_strings = []
for vep_annotation in vep_info:
try:
vep_info_list = [
vep_annotation[vep_key] for vep_key in vep_columns
]
except KeyError:
raise SyntaxError("Vep entry does not correspond to vep headers")
vep_strings.append('|'.join(vep_info_list))
return ','.join(vep_strings)
|
Build a vep string formatted string.
Take a list with vep annotations and build a new vep string
Args:
vep_info (list): A list with vep annotation dictionaries
vep_columns (list): A list with the vep column names found in the
header of the vcf
Returns:
string: A string with the proper vep annotations
|
train
|
https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/utils/build_vep.py#L3-L31
| null |
from logging import getLogger
def build_vep_string(vep_info, vep_columns):
"""
Build a vep string formatted string.
Take a list with vep annotations and build a new vep string
Args:
vep_info (list): A list with vep annotation dictionaries
vep_columns (list): A list with the vep column names found in the
header of the vcf
Returns:
string: A string with the proper vep annotations
"""
logger = getLogger(__name__)
logger.debug("Building vep string from {0}".format(vep_info))
logger.debug("Found vep headers {0}".format(vep_columns))
vep_strings = []
for vep_annotation in vep_info:
try:
vep_info_list = [
vep_annotation[vep_key] for vep_key in vep_columns
]
except KeyError:
raise SyntaxError("Vep entry does not correspond to vep headers")
vep_strings.append('|'.join(vep_info_list))
return ','.join(vep_strings)
def build_vep_annotation(csq_info, reference, alternatives, vep_columns):
"""
Build a dictionary with the vep information from the vep annotation.
Indels are handled different by vep depending on the number of
alternative alleles there is for a variant.
If only one alternative:
Insertion: vep represents the alternative by removing the first
base from the vcf alternative.
Deletion: vep represents the alternative with '-'
If there are several alternatives:
Insertion:
vep represents the alternative by removing the first
base from the vcf alternative(Like above).
Deletion:
If there are multiple alternative deletions vep represents them by
removing the first base from the vcf alternative.
If the vcf line looks like:
1 970549 . TGGG TG,TGG
vep annotation for alternatives will be: G,GG
Args:
csq_info (list): A list with the raw vep annotations from the vcf line.
reference (str): A string that represents the vcf reference
alternatives (list): A list of strings that represents the vcf formated
alternatives
vep_columns (list): A list of strings that represents the vep comluns
defined in the vcf header.
Returns:
vep_dict (dict): A dictionary with the alternative alleles (in vcf form)
as keys and a list of annotations for each alternative
alleles.
One key named 'gene_ids',
value is a set with the genes found.
"""
logger = getLogger(__name__)
# The keys in the vep dict are the vcf formatted alternatives, values are the
# dictionaries with vep annotations
vep_dict = {}
# If we have several alternatives we need to check what types of
# alternatives we have
vep_to_vcf = {}
number_of_deletions = 0
for alternative in alternatives:
if len(alternative) < len(reference):
number_of_deletions += 1
logger.debug("Number of deletions found: {0}".format(number_of_deletions))
for alternative in alternatives:
# We store the annotations with keys from the vcf alternatives
vep_dict[alternative] = []
# If substitutuion reference and alternative have the same length
if len(alternative) == len(reference):
vep_to_vcf[alternative] = alternative
# If deletion alternative is shorter that the reference
else:
# If there is a deletion then the alternative will be '-' in vep entry
if len(alternative) == 1:
vep_to_vcf['-'] = alternative
else:
vep_to_vcf[alternative[1:]] = alternative
for vep_annotation in csq_info:
logger.debug("Parsing vep annotation: {0}".format(vep_annotation))
splitted_vep = vep_annotation.split('|')
if len(splitted_vep) != len(vep_columns):
raise SyntaxError("Csq info for variant does not match csq info in "\
"header. {0}, {1}".format(
'|'.join(splitted_vep), '|'.join(vep_columns)))
# Build the vep dict:
vep_info = dict(zip(vep_columns, splitted_vep))
# If no allele is found we can not determine what allele
if vep_info.get('Allele', None):
vep_allele = vep_info['Allele']
try:
vcf_allele = vep_to_vcf[vep_allele]
except KeyError as e:
vcf_allele = vep_allele
if vcf_allele in vep_dict:
vep_dict[vcf_allele].append(vep_info)
else:
vep_dict[vcf_allele] = [vep_info]
else:
logger.warning("No allele found in vep annotation! Skipping annotation")
return vep_dict
|
moonso/vcf_parser
|
vcf_parser/utils/build_vep.py
|
build_vep_annotation
|
python
|
def build_vep_annotation(csq_info, reference, alternatives, vep_columns):
logger = getLogger(__name__)
# The keys in the vep dict are the vcf formatted alternatives, values are the
# dictionaries with vep annotations
vep_dict = {}
# If we have several alternatives we need to check what types of
# alternatives we have
vep_to_vcf = {}
number_of_deletions = 0
for alternative in alternatives:
if len(alternative) < len(reference):
number_of_deletions += 1
logger.debug("Number of deletions found: {0}".format(number_of_deletions))
for alternative in alternatives:
# We store the annotations with keys from the vcf alternatives
vep_dict[alternative] = []
# If substitutuion reference and alternative have the same length
if len(alternative) == len(reference):
vep_to_vcf[alternative] = alternative
# If deletion alternative is shorter that the reference
else:
# If there is a deletion then the alternative will be '-' in vep entry
if len(alternative) == 1:
vep_to_vcf['-'] = alternative
else:
vep_to_vcf[alternative[1:]] = alternative
for vep_annotation in csq_info:
logger.debug("Parsing vep annotation: {0}".format(vep_annotation))
splitted_vep = vep_annotation.split('|')
if len(splitted_vep) != len(vep_columns):
raise SyntaxError("Csq info for variant does not match csq info in "\
"header. {0}, {1}".format(
'|'.join(splitted_vep), '|'.join(vep_columns)))
# Build the vep dict:
vep_info = dict(zip(vep_columns, splitted_vep))
# If no allele is found we can not determine what allele
if vep_info.get('Allele', None):
vep_allele = vep_info['Allele']
try:
vcf_allele = vep_to_vcf[vep_allele]
except KeyError as e:
vcf_allele = vep_allele
if vcf_allele in vep_dict:
vep_dict[vcf_allele].append(vep_info)
else:
vep_dict[vcf_allele] = [vep_info]
else:
logger.warning("No allele found in vep annotation! Skipping annotation")
return vep_dict
|
Build a dictionary with the vep information from the vep annotation.
Indels are handled different by vep depending on the number of
alternative alleles there is for a variant.
If only one alternative:
Insertion: vep represents the alternative by removing the first
base from the vcf alternative.
Deletion: vep represents the alternative with '-'
If there are several alternatives:
Insertion:
vep represents the alternative by removing the first
base from the vcf alternative(Like above).
Deletion:
If there are multiple alternative deletions vep represents them by
removing the first base from the vcf alternative.
If the vcf line looks like:
1 970549 . TGGG TG,TGG
vep annotation for alternatives will be: G,GG
Args:
csq_info (list): A list with the raw vep annotations from the vcf line.
reference (str): A string that represents the vcf reference
alternatives (list): A list of strings that represents the vcf formated
alternatives
vep_columns (list): A list of strings that represents the vep comluns
defined in the vcf header.
Returns:
vep_dict (dict): A dictionary with the alternative alleles (in vcf form)
as keys and a list of annotations for each alternative
alleles.
One key named 'gene_ids',
value is a set with the genes found.
|
train
|
https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/utils/build_vep.py#L33-L134
| null |
from logging import getLogger
def build_vep_string(vep_info, vep_columns):
"""
Build a vep string formatted string.
Take a list with vep annotations and build a new vep string
Args:
vep_info (list): A list with vep annotation dictionaries
vep_columns (list): A list with the vep column names found in the
header of the vcf
Returns:
string: A string with the proper vep annotations
"""
logger = getLogger(__name__)
logger.debug("Building vep string from {0}".format(vep_info))
logger.debug("Found vep headers {0}".format(vep_columns))
vep_strings = []
for vep_annotation in vep_info:
try:
vep_info_list = [
vep_annotation[vep_key] for vep_key in vep_columns
]
except KeyError:
raise SyntaxError("Vep entry does not correspond to vep headers")
vep_strings.append('|'.join(vep_info_list))
return ','.join(vep_strings)
def build_vep_annotation(csq_info, reference, alternatives, vep_columns):
"""
Build a dictionary with the vep information from the vep annotation.
Indels are handled different by vep depending on the number of
alternative alleles there is for a variant.
If only one alternative:
Insertion: vep represents the alternative by removing the first
base from the vcf alternative.
Deletion: vep represents the alternative with '-'
If there are several alternatives:
Insertion:
vep represents the alternative by removing the first
base from the vcf alternative(Like above).
Deletion:
If there are multiple alternative deletions vep represents them by
removing the first base from the vcf alternative.
If the vcf line looks like:
1 970549 . TGGG TG,TGG
vep annotation for alternatives will be: G,GG
Args:
csq_info (list): A list with the raw vep annotations from the vcf line.
reference (str): A string that represents the vcf reference
alternatives (list): A list of strings that represents the vcf formated
alternatives
vep_columns (list): A list of strings that represents the vep comluns
defined in the vcf header.
Returns:
vep_dict (dict): A dictionary with the alternative alleles (in vcf form)
as keys and a list of annotations for each alternative
alleles.
One key named 'gene_ids',
value is a set with the genes found.
"""
logger = getLogger(__name__)
# The keys in the vep dict are the vcf formatted alternatives, values are the
# dictionaries with vep annotations
vep_dict = {}
# If we have several alternatives we need to check what types of
# alternatives we have
vep_to_vcf = {}
number_of_deletions = 0
for alternative in alternatives:
if len(alternative) < len(reference):
number_of_deletions += 1
logger.debug("Number of deletions found: {0}".format(number_of_deletions))
for alternative in alternatives:
# We store the annotations with keys from the vcf alternatives
vep_dict[alternative] = []
# If substitutuion reference and alternative have the same length
if len(alternative) == len(reference):
vep_to_vcf[alternative] = alternative
# If deletion alternative is shorter that the reference
else:
# If there is a deletion then the alternative will be '-' in vep entry
if len(alternative) == 1:
vep_to_vcf['-'] = alternative
else:
vep_to_vcf[alternative[1:]] = alternative
for vep_annotation in csq_info:
logger.debug("Parsing vep annotation: {0}".format(vep_annotation))
splitted_vep = vep_annotation.split('|')
if len(splitted_vep) != len(vep_columns):
raise SyntaxError("Csq info for variant does not match csq info in "\
"header. {0}, {1}".format(
'|'.join(splitted_vep), '|'.join(vep_columns)))
# Build the vep dict:
vep_info = dict(zip(vep_columns, splitted_vep))
# If no allele is found we can not determine what allele
if vep_info.get('Allele', None):
vep_allele = vep_info['Allele']
try:
vcf_allele = vep_to_vcf[vep_allele]
except KeyError as e:
vcf_allele = vep_allele
if vcf_allele in vep_dict:
vep_dict[vcf_allele].append(vep_info)
else:
vep_dict[vcf_allele] = [vep_info]
else:
logger.warning("No allele found in vep annotation! Skipping annotation")
return vep_dict
|
ethereum/eth-utils
|
eth_utils/currency.py
|
from_wei
|
python
|
def from_wei(number: int, unit: str) -> Union[int, decimal.Decimal]:
if unit.lower() not in units:
raise ValueError(
"Unknown unit. Must be one of {0}".format("/".join(units.keys()))
)
if number == 0:
return 0
if number < MIN_WEI or number > MAX_WEI:
raise ValueError("value must be between 1 and 2**256 - 1")
unit_value = units[unit.lower()]
with localcontext() as ctx:
ctx.prec = 999
d_number = decimal.Decimal(value=number, context=ctx)
result_value = d_number / unit_value
return result_value
|
Takes a number of wei and converts it to any other ether unit.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/currency.py#L40-L62
| null |
import decimal
from decimal import localcontext
from typing import Union
from .types import is_integer, is_string
from .units import units
class denoms:
wei = int(units["wei"])
kwei = int(units["kwei"])
babbage = int(units["babbage"])
femtoether = int(units["femtoether"])
mwei = int(units["mwei"])
lovelace = int(units["lovelace"])
picoether = int(units["picoether"])
gwei = int(units["gwei"])
shannon = int(units["shannon"])
nanoether = int(units["nanoether"])
nano = int(units["nano"])
szabo = int(units["szabo"])
microether = int(units["microether"])
micro = int(units["micro"])
finney = int(units["finney"])
milliether = int(units["milliether"])
milli = int(units["milli"])
ether = int(units["ether"])
kether = int(units["kether"])
grand = int(units["grand"])
mether = int(units["mether"])
gether = int(units["gether"])
tether = int(units["tether"])
MIN_WEI = 0
MAX_WEI = 2 ** 256 - 1
def to_wei(number: int, unit: str) -> int:
"""
Takes a number of a unit and converts it to wei.
"""
if unit.lower() not in units:
raise ValueError(
"Unknown unit. Must be one of {0}".format("/".join(units.keys()))
)
if is_integer(number) or is_string(number):
d_number = decimal.Decimal(value=number)
elif isinstance(number, float):
d_number = decimal.Decimal(value=str(number))
elif isinstance(number, decimal.Decimal):
d_number = number
else:
raise TypeError("Unsupported type. Must be one of integer, float, or string")
s_number = str(number)
unit_value = units[unit.lower()]
if d_number == 0:
return 0
if d_number < 1 and "." in s_number:
with localcontext() as ctx:
multiplier = len(s_number) - s_number.index(".") - 1
ctx.prec = multiplier
d_number = decimal.Decimal(value=number, context=ctx) * 10 ** multiplier
unit_value /= 10 ** multiplier
with localcontext() as ctx:
ctx.prec = 999
result_value = decimal.Decimal(value=d_number, context=ctx) * unit_value
if result_value < MIN_WEI or result_value > MAX_WEI:
raise ValueError("Resulting wei value must be between 1 and 2**256 - 1")
return int(result_value)
|
ethereum/eth-utils
|
eth_utils/currency.py
|
to_wei
|
python
|
def to_wei(number: int, unit: str) -> int:
if unit.lower() not in units:
raise ValueError(
"Unknown unit. Must be one of {0}".format("/".join(units.keys()))
)
if is_integer(number) or is_string(number):
d_number = decimal.Decimal(value=number)
elif isinstance(number, float):
d_number = decimal.Decimal(value=str(number))
elif isinstance(number, decimal.Decimal):
d_number = number
else:
raise TypeError("Unsupported type. Must be one of integer, float, or string")
s_number = str(number)
unit_value = units[unit.lower()]
if d_number == 0:
return 0
if d_number < 1 and "." in s_number:
with localcontext() as ctx:
multiplier = len(s_number) - s_number.index(".") - 1
ctx.prec = multiplier
d_number = decimal.Decimal(value=number, context=ctx) * 10 ** multiplier
unit_value /= 10 ** multiplier
with localcontext() as ctx:
ctx.prec = 999
result_value = decimal.Decimal(value=d_number, context=ctx) * unit_value
if result_value < MIN_WEI or result_value > MAX_WEI:
raise ValueError("Resulting wei value must be between 1 and 2**256 - 1")
return int(result_value)
|
Takes a number of a unit and converts it to wei.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/currency.py#L65-L103
|
[
"def is_integer(value: Any) -> bool:\n return isinstance(value, integer_types) and not isinstance(value, bool)\n",
"def is_string(value: Any) -> bool:\n return isinstance(value, string_types)\n"
] |
import decimal
from decimal import localcontext
from typing import Union
from .types import is_integer, is_string
from .units import units
class denoms:
wei = int(units["wei"])
kwei = int(units["kwei"])
babbage = int(units["babbage"])
femtoether = int(units["femtoether"])
mwei = int(units["mwei"])
lovelace = int(units["lovelace"])
picoether = int(units["picoether"])
gwei = int(units["gwei"])
shannon = int(units["shannon"])
nanoether = int(units["nanoether"])
nano = int(units["nano"])
szabo = int(units["szabo"])
microether = int(units["microether"])
micro = int(units["micro"])
finney = int(units["finney"])
milliether = int(units["milliether"])
milli = int(units["milli"])
ether = int(units["ether"])
kether = int(units["kether"])
grand = int(units["grand"])
mether = int(units["mether"])
gether = int(units["gether"])
tether = int(units["tether"])
MIN_WEI = 0
MAX_WEI = 2 ** 256 - 1
def from_wei(number: int, unit: str) -> Union[int, decimal.Decimal]:
"""
Takes a number of wei and converts it to any other ether unit.
"""
if unit.lower() not in units:
raise ValueError(
"Unknown unit. Must be one of {0}".format("/".join(units.keys()))
)
if number == 0:
return 0
if number < MIN_WEI or number > MAX_WEI:
raise ValueError("value must be between 1 and 2**256 - 1")
unit_value = units[unit.lower()]
with localcontext() as ctx:
ctx.prec = 999
d_number = decimal.Decimal(value=number, context=ctx)
result_value = d_number / unit_value
return result_value
|
ethereum/eth-utils
|
eth_utils/conversions.py
|
to_hex
|
python
|
def to_hex(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> HexStr:
if hexstr is not None:
return HexStr(add_0x_prefix(hexstr.lower()))
if text is not None:
return HexStr(encode_hex(text.encode("utf-8")))
if is_boolean(primitive):
return HexStr("0x1") if primitive else HexStr("0x0")
if isinstance(primitive, (bytes, bytearray)):
return HexStr(encode_hex(primitive))
elif is_string(primitive):
raise TypeError(
"Unsupported type: The primitive argument must be one of: bytes,"
"bytearray, int or bool and not str"
)
if is_integer(primitive):
return HexStr(hex(cast(int, primitive)))
raise TypeError(
"Unsupported type: '{0}'. Must be one of: bool, str, bytes, bytearray"
"or int.".format(repr(type(primitive)))
)
|
Auto converts any supported value into its hex representation.
Trims leading zeros, as defined in:
https://github.com/ethereum/wiki/wiki/JSON-RPC#hex-value-encoding
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/conversions.py#L11-L42
|
[
"def add_0x_prefix(value: str) -> str:\n if is_0x_prefixed(value):\n return value\n return \"0x\" + value\n",
"def encode_hex(value: AnyStr) -> str:\n if not is_string(value):\n raise TypeError(\"Value must be an instance of str or unicode\")\n binary_hex = codecs.encode(value, \"hex\") # type: ignore\n return add_0x_prefix(binary_hex.decode(\"ascii\"))\n",
"def is_boolean(value: Any) -> bool:\n return isinstance(value, bool)\n"
] |
from typing import Callable, Union, cast
from .decorators import validate_conversion_arguments
from .encoding import big_endian_to_int, int_to_big_endian
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .types import is_boolean, is_integer, is_string
from .typing import HexStr, Primitives, T
@validate_conversion_arguments
@validate_conversion_arguments
def to_int(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> int:
"""
Converts value to its integer representation.
Values are converted this way:
* primitive:
* bytes, bytearrays: big-endian integer
* bool: True => 1, False => 0
* hexstr: interpret hex as integer
* text: interpret as string of digits, like '12' => 12
"""
if hexstr is not None:
return int(hexstr, 16)
elif text is not None:
return int(text)
elif isinstance(primitive, (bytes, bytearray)):
return big_endian_to_int(primitive)
elif isinstance(primitive, str):
raise TypeError("Pass in strings with keyword hexstr or text")
elif isinstance(primitive, (int, bool)):
return int(primitive)
else:
raise TypeError(
"Invalid type. Expected one of int/bool/str/bytes/bytearray. Got "
"{0}".format(type(primitive))
)
@validate_conversion_arguments
def to_bytes(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> bytes:
if is_boolean(primitive):
return b"\x01" if primitive else b"\x00"
elif isinstance(primitive, bytearray):
return bytes(primitive)
elif isinstance(primitive, bytes):
return primitive
elif is_integer(primitive):
return to_bytes(hexstr=to_hex(primitive))
elif hexstr is not None:
if len(hexstr) % 2:
# type check ignored here because casting an Optional arg to str is not possible
hexstr = "0x0" + remove_0x_prefix(hexstr) # type: ignore
return decode_hex(hexstr)
elif text is not None:
return text.encode("utf-8")
raise TypeError(
"expected a bool, int, byte or bytearray in first arg, or keyword of hexstr or text"
)
@validate_conversion_arguments
def to_text(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> str:
if hexstr is not None:
return to_bytes(hexstr=hexstr).decode("utf-8")
elif text is not None:
return text
elif isinstance(primitive, str):
return to_text(hexstr=primitive)
elif isinstance(primitive, (bytes, bytearray)):
return primitive.decode("utf-8")
elif is_integer(primitive):
byte_encoding = int_to_big_endian(primitive) # type: ignore
return to_text(byte_encoding)
raise TypeError("Expected an int, bytes, bytearray or hexstr.")
def text_if_str(
to_type: Callable[..., T], text_or_primitive: Union[bytes, int, str]
) -> T:
"""
Convert to a type, assuming that strings can be only unicode text (not a hexstr)
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param text_or_primitive bytes, str, int: value to convert
"""
if isinstance(text_or_primitive, str):
return to_type(text=text_or_primitive)
else:
return to_type(text_or_primitive)
def hexstr_if_str(
to_type: Callable[..., T], hexstr_or_primitive: Union[bytes, int, str]
) -> T:
"""
Convert to a type, assuming that strings can be only hexstr (not unicode text)
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param hexstr_or_primitive bytes, str, int: value to convert
"""
if isinstance(hexstr_or_primitive, str):
if remove_0x_prefix(hexstr_or_primitive) and not is_hex(hexstr_or_primitive):
raise ValueError(
"when sending a str, it must be a hex string. Got: {0!r}".format(
hexstr_or_primitive
)
)
return to_type(hexstr=hexstr_or_primitive)
else:
return to_type(hexstr_or_primitive)
|
ethereum/eth-utils
|
eth_utils/conversions.py
|
to_int
|
python
|
def to_int(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> int:
if hexstr is not None:
return int(hexstr, 16)
elif text is not None:
return int(text)
elif isinstance(primitive, (bytes, bytearray)):
return big_endian_to_int(primitive)
elif isinstance(primitive, str):
raise TypeError("Pass in strings with keyword hexstr or text")
elif isinstance(primitive, (int, bool)):
return int(primitive)
else:
raise TypeError(
"Invalid type. Expected one of int/bool/str/bytes/bytearray. Got "
"{0}".format(type(primitive))
)
|
Converts value to its integer representation.
Values are converted this way:
* primitive:
* bytes, bytearrays: big-endian integer
* bool: True => 1, False => 0
* hexstr: interpret hex as integer
* text: interpret as string of digits, like '12' => 12
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/conversions.py#L46-L74
|
[
"def big_endian_to_int(value: bytes) -> int:\n return int.from_bytes(value, \"big\")\n"
] |
from typing import Callable, Union, cast
from .decorators import validate_conversion_arguments
from .encoding import big_endian_to_int, int_to_big_endian
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .types import is_boolean, is_integer, is_string
from .typing import HexStr, Primitives, T
@validate_conversion_arguments
def to_hex(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> HexStr:
"""
Auto converts any supported value into its hex representation.
Trims leading zeros, as defined in:
https://github.com/ethereum/wiki/wiki/JSON-RPC#hex-value-encoding
"""
if hexstr is not None:
return HexStr(add_0x_prefix(hexstr.lower()))
if text is not None:
return HexStr(encode_hex(text.encode("utf-8")))
if is_boolean(primitive):
return HexStr("0x1") if primitive else HexStr("0x0")
if isinstance(primitive, (bytes, bytearray)):
return HexStr(encode_hex(primitive))
elif is_string(primitive):
raise TypeError(
"Unsupported type: The primitive argument must be one of: bytes,"
"bytearray, int or bool and not str"
)
if is_integer(primitive):
return HexStr(hex(cast(int, primitive)))
raise TypeError(
"Unsupported type: '{0}'. Must be one of: bool, str, bytes, bytearray"
"or int.".format(repr(type(primitive)))
)
@validate_conversion_arguments
@validate_conversion_arguments
def to_bytes(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> bytes:
if is_boolean(primitive):
return b"\x01" if primitive else b"\x00"
elif isinstance(primitive, bytearray):
return bytes(primitive)
elif isinstance(primitive, bytes):
return primitive
elif is_integer(primitive):
return to_bytes(hexstr=to_hex(primitive))
elif hexstr is not None:
if len(hexstr) % 2:
# type check ignored here because casting an Optional arg to str is not possible
hexstr = "0x0" + remove_0x_prefix(hexstr) # type: ignore
return decode_hex(hexstr)
elif text is not None:
return text.encode("utf-8")
raise TypeError(
"expected a bool, int, byte or bytearray in first arg, or keyword of hexstr or text"
)
@validate_conversion_arguments
def to_text(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> str:
if hexstr is not None:
return to_bytes(hexstr=hexstr).decode("utf-8")
elif text is not None:
return text
elif isinstance(primitive, str):
return to_text(hexstr=primitive)
elif isinstance(primitive, (bytes, bytearray)):
return primitive.decode("utf-8")
elif is_integer(primitive):
byte_encoding = int_to_big_endian(primitive) # type: ignore
return to_text(byte_encoding)
raise TypeError("Expected an int, bytes, bytearray or hexstr.")
def text_if_str(
to_type: Callable[..., T], text_or_primitive: Union[bytes, int, str]
) -> T:
"""
Convert to a type, assuming that strings can be only unicode text (not a hexstr)
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param text_or_primitive bytes, str, int: value to convert
"""
if isinstance(text_or_primitive, str):
return to_type(text=text_or_primitive)
else:
return to_type(text_or_primitive)
def hexstr_if_str(
to_type: Callable[..., T], hexstr_or_primitive: Union[bytes, int, str]
) -> T:
"""
Convert to a type, assuming that strings can be only hexstr (not unicode text)
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param hexstr_or_primitive bytes, str, int: value to convert
"""
if isinstance(hexstr_or_primitive, str):
if remove_0x_prefix(hexstr_or_primitive) and not is_hex(hexstr_or_primitive):
raise ValueError(
"when sending a str, it must be a hex string. Got: {0!r}".format(
hexstr_or_primitive
)
)
return to_type(hexstr=hexstr_or_primitive)
else:
return to_type(hexstr_or_primitive)
|
ethereum/eth-utils
|
eth_utils/conversions.py
|
text_if_str
|
python
|
def text_if_str(
to_type: Callable[..., T], text_or_primitive: Union[bytes, int, str]
) -> T:
if isinstance(text_or_primitive, str):
return to_type(text=text_or_primitive)
else:
return to_type(text_or_primitive)
|
Convert to a type, assuming that strings can be only unicode text (not a hexstr)
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param text_or_primitive bytes, str, int: value to convert
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/conversions.py#L119-L132
| null |
from typing import Callable, Union, cast
from .decorators import validate_conversion_arguments
from .encoding import big_endian_to_int, int_to_big_endian
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .types import is_boolean, is_integer, is_string
from .typing import HexStr, Primitives, T
@validate_conversion_arguments
def to_hex(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> HexStr:
"""
Auto converts any supported value into its hex representation.
Trims leading zeros, as defined in:
https://github.com/ethereum/wiki/wiki/JSON-RPC#hex-value-encoding
"""
if hexstr is not None:
return HexStr(add_0x_prefix(hexstr.lower()))
if text is not None:
return HexStr(encode_hex(text.encode("utf-8")))
if is_boolean(primitive):
return HexStr("0x1") if primitive else HexStr("0x0")
if isinstance(primitive, (bytes, bytearray)):
return HexStr(encode_hex(primitive))
elif is_string(primitive):
raise TypeError(
"Unsupported type: The primitive argument must be one of: bytes,"
"bytearray, int or bool and not str"
)
if is_integer(primitive):
return HexStr(hex(cast(int, primitive)))
raise TypeError(
"Unsupported type: '{0}'. Must be one of: bool, str, bytes, bytearray"
"or int.".format(repr(type(primitive)))
)
@validate_conversion_arguments
def to_int(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> int:
"""
Converts value to its integer representation.
Values are converted this way:
* primitive:
* bytes, bytearrays: big-endian integer
* bool: True => 1, False => 0
* hexstr: interpret hex as integer
* text: interpret as string of digits, like '12' => 12
"""
if hexstr is not None:
return int(hexstr, 16)
elif text is not None:
return int(text)
elif isinstance(primitive, (bytes, bytearray)):
return big_endian_to_int(primitive)
elif isinstance(primitive, str):
raise TypeError("Pass in strings with keyword hexstr or text")
elif isinstance(primitive, (int, bool)):
return int(primitive)
else:
raise TypeError(
"Invalid type. Expected one of int/bool/str/bytes/bytearray. Got "
"{0}".format(type(primitive))
)
@validate_conversion_arguments
def to_bytes(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> bytes:
if is_boolean(primitive):
return b"\x01" if primitive else b"\x00"
elif isinstance(primitive, bytearray):
return bytes(primitive)
elif isinstance(primitive, bytes):
return primitive
elif is_integer(primitive):
return to_bytes(hexstr=to_hex(primitive))
elif hexstr is not None:
if len(hexstr) % 2:
# type check ignored here because casting an Optional arg to str is not possible
hexstr = "0x0" + remove_0x_prefix(hexstr) # type: ignore
return decode_hex(hexstr)
elif text is not None:
return text.encode("utf-8")
raise TypeError(
"expected a bool, int, byte or bytearray in first arg, or keyword of hexstr or text"
)
@validate_conversion_arguments
def to_text(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> str:
if hexstr is not None:
return to_bytes(hexstr=hexstr).decode("utf-8")
elif text is not None:
return text
elif isinstance(primitive, str):
return to_text(hexstr=primitive)
elif isinstance(primitive, (bytes, bytearray)):
return primitive.decode("utf-8")
elif is_integer(primitive):
byte_encoding = int_to_big_endian(primitive) # type: ignore
return to_text(byte_encoding)
raise TypeError("Expected an int, bytes, bytearray or hexstr.")
def hexstr_if_str(
to_type: Callable[..., T], hexstr_or_primitive: Union[bytes, int, str]
) -> T:
"""
Convert to a type, assuming that strings can be only hexstr (not unicode text)
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param hexstr_or_primitive bytes, str, int: value to convert
"""
if isinstance(hexstr_or_primitive, str):
if remove_0x_prefix(hexstr_or_primitive) and not is_hex(hexstr_or_primitive):
raise ValueError(
"when sending a str, it must be a hex string. Got: {0!r}".format(
hexstr_or_primitive
)
)
return to_type(hexstr=hexstr_or_primitive)
else:
return to_type(hexstr_or_primitive)
|
ethereum/eth-utils
|
eth_utils/conversions.py
|
hexstr_if_str
|
python
|
def hexstr_if_str(
to_type: Callable[..., T], hexstr_or_primitive: Union[bytes, int, str]
) -> T:
if isinstance(hexstr_or_primitive, str):
if remove_0x_prefix(hexstr_or_primitive) and not is_hex(hexstr_or_primitive):
raise ValueError(
"when sending a str, it must be a hex string. Got: {0!r}".format(
hexstr_or_primitive
)
)
return to_type(hexstr=hexstr_or_primitive)
else:
return to_type(hexstr_or_primitive)
|
Convert to a type, assuming that strings can be only hexstr (not unicode text)
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param hexstr_or_primitive bytes, str, int: value to convert
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/conversions.py#L135-L154
|
[
"def is_hex(value: Any) -> bool:\n if not is_text(value):\n raise TypeError(\n \"is_hex requires text typed arguments. Got: {0}\".format(repr(value))\n )\n elif value.lower() == \"0x\":\n return True\n\n unprefixed_value = remove_0x_prefix(value)\n if len(unprefixed_value) % 2 != 0:\n value_to_decode = \"0\" + unprefixed_value\n else:\n value_to_decode = unprefixed_value\n\n if any(char not in string.hexdigits for char in value_to_decode):\n return False\n\n try:\n value_as_bytes = codecs.decode(value_to_decode, \"hex\") # type: ignore\n except binascii.Error:\n return False\n except TypeError:\n return False\n else:\n return bool(value_as_bytes)\n",
"def remove_0x_prefix(value: str) -> str:\n if is_0x_prefixed(value):\n return value[2:]\n return value\n"
] |
from typing import Callable, Union, cast
from .decorators import validate_conversion_arguments
from .encoding import big_endian_to_int, int_to_big_endian
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .types import is_boolean, is_integer, is_string
from .typing import HexStr, Primitives, T
@validate_conversion_arguments
def to_hex(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> HexStr:
"""
Auto converts any supported value into its hex representation.
Trims leading zeros, as defined in:
https://github.com/ethereum/wiki/wiki/JSON-RPC#hex-value-encoding
"""
if hexstr is not None:
return HexStr(add_0x_prefix(hexstr.lower()))
if text is not None:
return HexStr(encode_hex(text.encode("utf-8")))
if is_boolean(primitive):
return HexStr("0x1") if primitive else HexStr("0x0")
if isinstance(primitive, (bytes, bytearray)):
return HexStr(encode_hex(primitive))
elif is_string(primitive):
raise TypeError(
"Unsupported type: The primitive argument must be one of: bytes,"
"bytearray, int or bool and not str"
)
if is_integer(primitive):
return HexStr(hex(cast(int, primitive)))
raise TypeError(
"Unsupported type: '{0}'. Must be one of: bool, str, bytes, bytearray"
"or int.".format(repr(type(primitive)))
)
@validate_conversion_arguments
def to_int(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> int:
"""
Converts value to its integer representation.
Values are converted this way:
* primitive:
* bytes, bytearrays: big-endian integer
* bool: True => 1, False => 0
* hexstr: interpret hex as integer
* text: interpret as string of digits, like '12' => 12
"""
if hexstr is not None:
return int(hexstr, 16)
elif text is not None:
return int(text)
elif isinstance(primitive, (bytes, bytearray)):
return big_endian_to_int(primitive)
elif isinstance(primitive, str):
raise TypeError("Pass in strings with keyword hexstr or text")
elif isinstance(primitive, (int, bool)):
return int(primitive)
else:
raise TypeError(
"Invalid type. Expected one of int/bool/str/bytes/bytearray. Got "
"{0}".format(type(primitive))
)
@validate_conversion_arguments
def to_bytes(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> bytes:
if is_boolean(primitive):
return b"\x01" if primitive else b"\x00"
elif isinstance(primitive, bytearray):
return bytes(primitive)
elif isinstance(primitive, bytes):
return primitive
elif is_integer(primitive):
return to_bytes(hexstr=to_hex(primitive))
elif hexstr is not None:
if len(hexstr) % 2:
# type check ignored here because casting an Optional arg to str is not possible
hexstr = "0x0" + remove_0x_prefix(hexstr) # type: ignore
return decode_hex(hexstr)
elif text is not None:
return text.encode("utf-8")
raise TypeError(
"expected a bool, int, byte or bytearray in first arg, or keyword of hexstr or text"
)
@validate_conversion_arguments
def to_text(
primitive: Primitives = None, hexstr: HexStr = None, text: str = None
) -> str:
if hexstr is not None:
return to_bytes(hexstr=hexstr).decode("utf-8")
elif text is not None:
return text
elif isinstance(primitive, str):
return to_text(hexstr=primitive)
elif isinstance(primitive, (bytes, bytearray)):
return primitive.decode("utf-8")
elif is_integer(primitive):
byte_encoding = int_to_big_endian(primitive) # type: ignore
return to_text(byte_encoding)
raise TypeError("Expected an int, bytes, bytearray or hexstr.")
def text_if_str(
to_type: Callable[..., T], text_or_primitive: Union[bytes, int, str]
) -> T:
"""
Convert to a type, assuming that strings can be only unicode text (not a hexstr)
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param text_or_primitive bytes, str, int: value to convert
"""
if isinstance(text_or_primitive, str):
return to_type(text=text_or_primitive)
else:
return to_type(text_or_primitive)
|
ethereum/eth-utils
|
eth_utils/decorators.py
|
validate_conversion_arguments
|
python
|
def validate_conversion_arguments(to_wrap):
@functools.wraps(to_wrap)
def wrapper(*args, **kwargs):
_assert_one_val(*args, **kwargs)
if kwargs:
_validate_supported_kwarg(kwargs)
if len(args) == 0 and "primitive" not in kwargs:
_assert_hexstr_or_text_kwarg_is_text_type(**kwargs)
return to_wrap(*args, **kwargs)
return wrapper
|
Validates arguments for conversion functions.
- Only a single argument is present
- Kwarg must be 'primitive' 'hexstr' or 'text'
- If it is 'hexstr' or 'text' that it is a text type
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/decorators.py#L59-L77
| null |
import functools
import itertools
from typing import Any, Callable, Dict, Iterable, Type
from .types import is_text
class combomethod(object):
def __init__(self, method):
self.method = method
def __get__(self, obj=None, objtype=None):
@functools.wraps(self.method)
def _wrapper(*args, **kwargs):
if obj is not None:
return self.method(obj, *args, **kwargs)
else:
return self.method(objtype, *args, **kwargs)
return _wrapper
def _has_one_val(*args, **kwargs):
vals = itertools.chain(args, kwargs.values())
not_nones = list(filter(lambda val: val is not None, vals))
return len(not_nones) == 1
def _assert_one_val(*args, **kwargs):
if not _has_one_val(*args, **kwargs):
raise TypeError(
"Exactly one of the passed values can be specified. "
"Instead, values were: %r, %r" % (args, kwargs)
)
def _hexstr_or_text_kwarg_is_text_type(**kwargs):
value = kwargs["hexstr"] if "hexstr" in kwargs else kwargs["text"]
return is_text(value)
def _assert_hexstr_or_text_kwarg_is_text_type(**kwargs):
if not _hexstr_or_text_kwarg_is_text_type(**kwargs):
raise TypeError(
"Arguments passed as hexstr or text must be of text type. "
"Instead, value was: %r" % (repr(next(list(kwargs.values()))))
)
def _validate_supported_kwarg(kwargs):
if next(iter(kwargs)) not in ["primitive", "hexstr", "text"]:
raise TypeError(
"Kwarg must be 'primitive', 'hexstr', or 'text'. "
"Instead, kwarg was: %r" % (next(iter(kwargs)))
)
def return_arg_type(at_position):
"""
Wrap the return value with the result of `type(args[at_position])`
"""
def decorator(to_wrap):
@functools.wraps(to_wrap)
def wrapper(*args, **kwargs):
result = to_wrap(*args, **kwargs)
ReturnType = type(args[at_position])
return ReturnType(result)
return wrapper
return decorator
def replace_exceptions(
old_to_new_exceptions: Dict[Type[BaseException], Type[BaseException]]
) -> Callable[..., Any]:
"""
Replaces old exceptions with new exceptions to be raised in their place.
"""
old_exceptions = tuple(old_to_new_exceptions.keys())
def decorator(to_wrap: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(to_wrap)
# String type b/c pypy3 throws SegmentationFault with Iterable as arg on nested fn
# Ignore so we don't have to import `Iterable`
def wrapper(
*args: Iterable[Any], **kwargs: Dict[str, Any]
) -> Callable[..., Any]:
try:
return to_wrap(*args, **kwargs)
except old_exceptions as err:
try:
raise old_to_new_exceptions[type(err)] from err
except KeyError:
raise TypeError(
"could not look up new exception to use for %r" % err
) from err
return wrapper
return decorator
|
ethereum/eth-utils
|
eth_utils/decorators.py
|
return_arg_type
|
python
|
def return_arg_type(at_position):
def decorator(to_wrap):
@functools.wraps(to_wrap)
def wrapper(*args, **kwargs):
result = to_wrap(*args, **kwargs)
ReturnType = type(args[at_position])
return ReturnType(result)
return wrapper
return decorator
|
Wrap the return value with the result of `type(args[at_position])`
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/decorators.py#L80-L94
| null |
import functools
import itertools
from typing import Any, Callable, Dict, Iterable, Type
from .types import is_text
class combomethod(object):
def __init__(self, method):
self.method = method
def __get__(self, obj=None, objtype=None):
@functools.wraps(self.method)
def _wrapper(*args, **kwargs):
if obj is not None:
return self.method(obj, *args, **kwargs)
else:
return self.method(objtype, *args, **kwargs)
return _wrapper
def _has_one_val(*args, **kwargs):
vals = itertools.chain(args, kwargs.values())
not_nones = list(filter(lambda val: val is not None, vals))
return len(not_nones) == 1
def _assert_one_val(*args, **kwargs):
if not _has_one_val(*args, **kwargs):
raise TypeError(
"Exactly one of the passed values can be specified. "
"Instead, values were: %r, %r" % (args, kwargs)
)
def _hexstr_or_text_kwarg_is_text_type(**kwargs):
value = kwargs["hexstr"] if "hexstr" in kwargs else kwargs["text"]
return is_text(value)
def _assert_hexstr_or_text_kwarg_is_text_type(**kwargs):
if not _hexstr_or_text_kwarg_is_text_type(**kwargs):
raise TypeError(
"Arguments passed as hexstr or text must be of text type. "
"Instead, value was: %r" % (repr(next(list(kwargs.values()))))
)
def _validate_supported_kwarg(kwargs):
if next(iter(kwargs)) not in ["primitive", "hexstr", "text"]:
raise TypeError(
"Kwarg must be 'primitive', 'hexstr', or 'text'. "
"Instead, kwarg was: %r" % (next(iter(kwargs)))
)
def validate_conversion_arguments(to_wrap):
"""
Validates arguments for conversion functions.
- Only a single argument is present
- Kwarg must be 'primitive' 'hexstr' or 'text'
- If it is 'hexstr' or 'text' that it is a text type
"""
@functools.wraps(to_wrap)
def wrapper(*args, **kwargs):
_assert_one_val(*args, **kwargs)
if kwargs:
_validate_supported_kwarg(kwargs)
if len(args) == 0 and "primitive" not in kwargs:
_assert_hexstr_or_text_kwarg_is_text_type(**kwargs)
return to_wrap(*args, **kwargs)
return wrapper
def replace_exceptions(
old_to_new_exceptions: Dict[Type[BaseException], Type[BaseException]]
) -> Callable[..., Any]:
"""
Replaces old exceptions with new exceptions to be raised in their place.
"""
old_exceptions = tuple(old_to_new_exceptions.keys())
def decorator(to_wrap: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(to_wrap)
# String type b/c pypy3 throws SegmentationFault with Iterable as arg on nested fn
# Ignore so we don't have to import `Iterable`
def wrapper(
*args: Iterable[Any], **kwargs: Dict[str, Any]
) -> Callable[..., Any]:
try:
return to_wrap(*args, **kwargs)
except old_exceptions as err:
try:
raise old_to_new_exceptions[type(err)] from err
except KeyError:
raise TypeError(
"could not look up new exception to use for %r" % err
) from err
return wrapper
return decorator
|
ethereum/eth-utils
|
eth_utils/decorators.py
|
replace_exceptions
|
python
|
def replace_exceptions(
old_to_new_exceptions: Dict[Type[BaseException], Type[BaseException]]
) -> Callable[..., Any]:
old_exceptions = tuple(old_to_new_exceptions.keys())
def decorator(to_wrap: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(to_wrap)
# String type b/c pypy3 throws SegmentationFault with Iterable as arg on nested fn
# Ignore so we don't have to import `Iterable`
def wrapper(
*args: Iterable[Any], **kwargs: Dict[str, Any]
) -> Callable[..., Any]:
try:
return to_wrap(*args, **kwargs)
except old_exceptions as err:
try:
raise old_to_new_exceptions[type(err)] from err
except KeyError:
raise TypeError(
"could not look up new exception to use for %r" % err
) from err
return wrapper
return decorator
|
Replaces old exceptions with new exceptions to be raised in their place.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/decorators.py#L97-L124
| null |
import functools
import itertools
from typing import Any, Callable, Dict, Iterable, Type
from .types import is_text
class combomethod(object):
def __init__(self, method):
self.method = method
def __get__(self, obj=None, objtype=None):
@functools.wraps(self.method)
def _wrapper(*args, **kwargs):
if obj is not None:
return self.method(obj, *args, **kwargs)
else:
return self.method(objtype, *args, **kwargs)
return _wrapper
def _has_one_val(*args, **kwargs):
vals = itertools.chain(args, kwargs.values())
not_nones = list(filter(lambda val: val is not None, vals))
return len(not_nones) == 1
def _assert_one_val(*args, **kwargs):
if not _has_one_val(*args, **kwargs):
raise TypeError(
"Exactly one of the passed values can be specified. "
"Instead, values were: %r, %r" % (args, kwargs)
)
def _hexstr_or_text_kwarg_is_text_type(**kwargs):
value = kwargs["hexstr"] if "hexstr" in kwargs else kwargs["text"]
return is_text(value)
def _assert_hexstr_or_text_kwarg_is_text_type(**kwargs):
if not _hexstr_or_text_kwarg_is_text_type(**kwargs):
raise TypeError(
"Arguments passed as hexstr or text must be of text type. "
"Instead, value was: %r" % (repr(next(list(kwargs.values()))))
)
def _validate_supported_kwarg(kwargs):
if next(iter(kwargs)) not in ["primitive", "hexstr", "text"]:
raise TypeError(
"Kwarg must be 'primitive', 'hexstr', or 'text'. "
"Instead, kwarg was: %r" % (next(iter(kwargs)))
)
def validate_conversion_arguments(to_wrap):
"""
Validates arguments for conversion functions.
- Only a single argument is present
- Kwarg must be 'primitive' 'hexstr' or 'text'
- If it is 'hexstr' or 'text' that it is a text type
"""
@functools.wraps(to_wrap)
def wrapper(*args, **kwargs):
_assert_one_val(*args, **kwargs)
if kwargs:
_validate_supported_kwarg(kwargs)
if len(args) == 0 and "primitive" not in kwargs:
_assert_hexstr_or_text_kwarg_is_text_type(**kwargs)
return to_wrap(*args, **kwargs)
return wrapper
def return_arg_type(at_position):
"""
Wrap the return value with the result of `type(args[at_position])`
"""
def decorator(to_wrap):
@functools.wraps(to_wrap)
def wrapper(*args, **kwargs):
result = to_wrap(*args, **kwargs)
ReturnType = type(args[at_position])
return ReturnType(result)
return wrapper
return decorator
|
ethereum/eth-utils
|
eth_utils/abi.py
|
collapse_if_tuple
|
python
|
def collapse_if_tuple(abi):
typ = abi["type"]
if not typ.startswith("tuple"):
return typ
delimited = ",".join(collapse_if_tuple(c) for c in abi["components"])
# Whatever comes after "tuple" is the array dims. The ABI spec states that
# this will have the form "", "[]", or "[k]".
array_dim = typ[5:]
collapsed = "({}){}".format(delimited, array_dim)
return collapsed
|
Converts a tuple from a dict to a parenthesized list of its types.
>>> from eth_utils.abi import collapse_if_tuple
>>> collapse_if_tuple(
... {
... 'components': [
... {'name': 'anAddress', 'type': 'address'},
... {'name': 'anInt', 'type': 'uint256'},
... {'name': 'someBytes', 'type': 'bytes'},
... ],
... 'type': 'tuple',
... }
... )
'(address,uint256,bytes)'
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/abi.py#L6-L32
| null |
from typing import Any, Dict
from .crypto import keccak
def _abi_to_signature(abi: Dict[str, Any]) -> str:
function_signature = "{fn_name}({fn_input_types})".format(
fn_name=abi["name"],
fn_input_types=",".join(
[collapse_if_tuple(abi_input) for abi_input in abi.get("inputs", [])]
),
)
return function_signature
def function_signature_to_4byte_selector(event_signature: str) -> bytes:
return keccak(text=event_signature.replace(" ", ""))[:4]
def function_abi_to_4byte_selector(function_abi: Dict[str, Any]) -> bytes:
function_signature = _abi_to_signature(function_abi)
return function_signature_to_4byte_selector(function_signature)
def event_signature_to_log_topic(event_signature: str) -> bytes:
return keccak(text=event_signature.replace(" ", ""))
def event_abi_to_log_topic(event_abi: Dict[str, Any]) -> bytes:
event_signature = _abi_to_signature(event_abi)
return event_signature_to_log_topic(event_signature)
|
ethereum/eth-utils
|
eth_utils/address.py
|
is_hex_address
|
python
|
def is_hex_address(value: Any) -> bool:
if not is_text(value):
return False
elif not is_hex(value):
return False
else:
unprefixed = remove_0x_prefix(value)
return len(unprefixed) == 40
|
Checks if the given string of text type is an address in hexadecimal encoded form.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/address.py#L10-L20
|
[
"def is_hex(value: Any) -> bool:\n if not is_text(value):\n raise TypeError(\n \"is_hex requires text typed arguments. Got: {0}\".format(repr(value))\n )\n elif value.lower() == \"0x\":\n return True\n\n unprefixed_value = remove_0x_prefix(value)\n if len(unprefixed_value) % 2 != 0:\n value_to_decode = \"0\" + unprefixed_value\n else:\n value_to_decode = unprefixed_value\n\n if any(char not in string.hexdigits for char in value_to_decode):\n return False\n\n try:\n value_as_bytes = codecs.decode(value_to_decode, \"hex\") # type: ignore\n except binascii.Error:\n return False\n except TypeError:\n return False\n else:\n return bool(value_as_bytes)\n",
"def remove_0x_prefix(value: str) -> str:\n if is_0x_prefixed(value):\n return value[2:]\n return value\n",
"def is_text(value: Any) -> bool:\n return isinstance(value, text_types)\n"
] |
from typing import Any, AnyStr
from .crypto import keccak
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .conversions import hexstr_if_str, to_hex
from .types import is_bytes, is_text
from .typing import Address, AnyAddress, ChecksumAddress, HexAddress
def is_binary_address(value: Any) -> bool:
"""
Checks if the given string is an address in raw bytes form.
"""
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
def is_address(value: Any) -> bool:
"""
Checks if the given string in a supported value
is an address in any of the known formats.
"""
if is_checksum_formatted_address(value):
return is_checksum_address(value)
elif is_hex_address(value):
return True
elif is_binary_address(value):
return True
else:
return False
def to_normalized_address(value: AnyStr) -> HexAddress:
"""
Converts an address to its normalized hexadecimal representation.
"""
try:
hex_address = hexstr_if_str(to_hex, value).lower()
except AttributeError:
raise TypeError(
"Value must be any string, instead got type {}".format(type(value))
)
if is_address(hex_address):
return HexAddress(hex_address)
else:
raise ValueError(
"Unknown format {}, attempted to normalize to {}".format(value, hex_address)
)
def is_normalized_address(value: Any) -> bool:
"""
Returns whether the provided value is an address in its normalized form.
"""
if not is_address(value):
return False
else:
return value == to_normalized_address(value)
def to_canonical_address(address: AnyStr) -> Address:
"""
Given any supported representation of an address
returns its canonical form (20 byte long string).
"""
return Address(decode_hex(to_normalized_address(address)))
def is_canonical_address(address: Any) -> bool:
"""
Returns `True` if the `value` is an address in its canonical form.
"""
if not is_bytes(address) or len(address) != 20:
return False
return address == to_canonical_address(address)
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool:
"""
Checks if both addresses are same or not.
"""
if not is_address(left) or not is_address(right):
raise ValueError("Both values must be valid addresses")
else:
return to_normalized_address(left) == to_normalized_address(right)
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address))
def is_checksum_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
return value == to_checksum_address(value)
def is_checksum_formatted_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).lower():
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).upper():
return False
else:
return True
|
ethereum/eth-utils
|
eth_utils/address.py
|
is_binary_address
|
python
|
def is_binary_address(value: Any) -> bool:
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
|
Checks if the given string is an address in raw bytes form.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/address.py#L23-L32
|
[
"def is_bytes(value: Any) -> bool:\n return isinstance(value, bytes_types)\n"
] |
from typing import Any, AnyStr
from .crypto import keccak
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .conversions import hexstr_if_str, to_hex
from .types import is_bytes, is_text
from .typing import Address, AnyAddress, ChecksumAddress, HexAddress
def is_hex_address(value: Any) -> bool:
"""
Checks if the given string of text type is an address in hexadecimal encoded form.
"""
if not is_text(value):
return False
elif not is_hex(value):
return False
else:
unprefixed = remove_0x_prefix(value)
return len(unprefixed) == 40
def is_address(value: Any) -> bool:
"""
Checks if the given string in a supported value
is an address in any of the known formats.
"""
if is_checksum_formatted_address(value):
return is_checksum_address(value)
elif is_hex_address(value):
return True
elif is_binary_address(value):
return True
else:
return False
def to_normalized_address(value: AnyStr) -> HexAddress:
"""
Converts an address to its normalized hexadecimal representation.
"""
try:
hex_address = hexstr_if_str(to_hex, value).lower()
except AttributeError:
raise TypeError(
"Value must be any string, instead got type {}".format(type(value))
)
if is_address(hex_address):
return HexAddress(hex_address)
else:
raise ValueError(
"Unknown format {}, attempted to normalize to {}".format(value, hex_address)
)
def is_normalized_address(value: Any) -> bool:
"""
Returns whether the provided value is an address in its normalized form.
"""
if not is_address(value):
return False
else:
return value == to_normalized_address(value)
def to_canonical_address(address: AnyStr) -> Address:
"""
Given any supported representation of an address
returns its canonical form (20 byte long string).
"""
return Address(decode_hex(to_normalized_address(address)))
def is_canonical_address(address: Any) -> bool:
"""
Returns `True` if the `value` is an address in its canonical form.
"""
if not is_bytes(address) or len(address) != 20:
return False
return address == to_canonical_address(address)
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool:
"""
Checks if both addresses are same or not.
"""
if not is_address(left) or not is_address(right):
raise ValueError("Both values must be valid addresses")
else:
return to_normalized_address(left) == to_normalized_address(right)
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address))
def is_checksum_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
return value == to_checksum_address(value)
def is_checksum_formatted_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).lower():
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).upper():
return False
else:
return True
|
ethereum/eth-utils
|
eth_utils/address.py
|
is_address
|
python
|
def is_address(value: Any) -> bool:
if is_checksum_formatted_address(value):
return is_checksum_address(value)
elif is_hex_address(value):
return True
elif is_binary_address(value):
return True
else:
return False
|
Checks if the given string in a supported value
is an address in any of the known formats.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/address.py#L35-L47
|
[
"def is_binary_address(value: Any) -> bool:\n \"\"\"\n Checks if the given string is an address in raw bytes form.\n \"\"\"\n if not is_bytes(value):\n return False\n elif len(value) != 20:\n return False\n else:\n return True\n",
"def is_checksum_address(value: Any) -> bool:\n if not is_text(value):\n return False\n\n if not is_hex_address(value):\n return False\n return value == to_checksum_address(value)\n",
"def is_checksum_formatted_address(value: Any) -> bool:\n if not is_text(value):\n return False\n\n if not is_hex_address(value):\n return False\n elif remove_0x_prefix(value) == remove_0x_prefix(value).lower():\n return False\n elif remove_0x_prefix(value) == remove_0x_prefix(value).upper():\n return False\n else:\n return True\n",
"def is_hex_address(value: Any) -> bool:\n \"\"\"\n Checks if the given string of text type is an address in hexadecimal encoded form.\n \"\"\"\n if not is_text(value):\n return False\n elif not is_hex(value):\n return False\n else:\n unprefixed = remove_0x_prefix(value)\n return len(unprefixed) == 40\n"
] |
from typing import Any, AnyStr
from .crypto import keccak
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .conversions import hexstr_if_str, to_hex
from .types import is_bytes, is_text
from .typing import Address, AnyAddress, ChecksumAddress, HexAddress
def is_hex_address(value: Any) -> bool:
"""
Checks if the given string of text type is an address in hexadecimal encoded form.
"""
if not is_text(value):
return False
elif not is_hex(value):
return False
else:
unprefixed = remove_0x_prefix(value)
return len(unprefixed) == 40
def is_binary_address(value: Any) -> bool:
"""
Checks if the given string is an address in raw bytes form.
"""
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
def to_normalized_address(value: AnyStr) -> HexAddress:
"""
Converts an address to its normalized hexadecimal representation.
"""
try:
hex_address = hexstr_if_str(to_hex, value).lower()
except AttributeError:
raise TypeError(
"Value must be any string, instead got type {}".format(type(value))
)
if is_address(hex_address):
return HexAddress(hex_address)
else:
raise ValueError(
"Unknown format {}, attempted to normalize to {}".format(value, hex_address)
)
def is_normalized_address(value: Any) -> bool:
"""
Returns whether the provided value is an address in its normalized form.
"""
if not is_address(value):
return False
else:
return value == to_normalized_address(value)
def to_canonical_address(address: AnyStr) -> Address:
"""
Given any supported representation of an address
returns its canonical form (20 byte long string).
"""
return Address(decode_hex(to_normalized_address(address)))
def is_canonical_address(address: Any) -> bool:
"""
Returns `True` if the `value` is an address in its canonical form.
"""
if not is_bytes(address) or len(address) != 20:
return False
return address == to_canonical_address(address)
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool:
"""
Checks if both addresses are same or not.
"""
if not is_address(left) or not is_address(right):
raise ValueError("Both values must be valid addresses")
else:
return to_normalized_address(left) == to_normalized_address(right)
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address))
def is_checksum_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
return value == to_checksum_address(value)
def is_checksum_formatted_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).lower():
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).upper():
return False
else:
return True
|
ethereum/eth-utils
|
eth_utils/address.py
|
to_normalized_address
|
python
|
def to_normalized_address(value: AnyStr) -> HexAddress:
try:
hex_address = hexstr_if_str(to_hex, value).lower()
except AttributeError:
raise TypeError(
"Value must be any string, instead got type {}".format(type(value))
)
if is_address(hex_address):
return HexAddress(hex_address)
else:
raise ValueError(
"Unknown format {}, attempted to normalize to {}".format(value, hex_address)
)
|
Converts an address to its normalized hexadecimal representation.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/address.py#L50-L65
|
[
"def is_address(value: Any) -> bool:\n \"\"\"\n Checks if the given string in a supported value\n is an address in any of the known formats.\n \"\"\"\n if is_checksum_formatted_address(value):\n return is_checksum_address(value)\n elif is_hex_address(value):\n return True\n elif is_binary_address(value):\n return True\n else:\n return False\n",
"def hexstr_if_str(\n to_type: Callable[..., T], hexstr_or_primitive: Union[bytes, int, str]\n) -> T:\n \"\"\"\n Convert to a type, assuming that strings can be only hexstr (not unicode text)\n\n :param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),\n eg~ to_bytes, to_text, to_hex, to_int, etc\n :param hexstr_or_primitive bytes, str, int: value to convert\n \"\"\"\n if isinstance(hexstr_or_primitive, str):\n if remove_0x_prefix(hexstr_or_primitive) and not is_hex(hexstr_or_primitive):\n raise ValueError(\n \"when sending a str, it must be a hex string. Got: {0!r}\".format(\n hexstr_or_primitive\n )\n )\n return to_type(hexstr=hexstr_or_primitive)\n else:\n return to_type(hexstr_or_primitive)\n"
] |
from typing import Any, AnyStr
from .crypto import keccak
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .conversions import hexstr_if_str, to_hex
from .types import is_bytes, is_text
from .typing import Address, AnyAddress, ChecksumAddress, HexAddress
def is_hex_address(value: Any) -> bool:
"""
Checks if the given string of text type is an address in hexadecimal encoded form.
"""
if not is_text(value):
return False
elif not is_hex(value):
return False
else:
unprefixed = remove_0x_prefix(value)
return len(unprefixed) == 40
def is_binary_address(value: Any) -> bool:
"""
Checks if the given string is an address in raw bytes form.
"""
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
def is_address(value: Any) -> bool:
"""
Checks if the given string in a supported value
is an address in any of the known formats.
"""
if is_checksum_formatted_address(value):
return is_checksum_address(value)
elif is_hex_address(value):
return True
elif is_binary_address(value):
return True
else:
return False
def is_normalized_address(value: Any) -> bool:
"""
Returns whether the provided value is an address in its normalized form.
"""
if not is_address(value):
return False
else:
return value == to_normalized_address(value)
def to_canonical_address(address: AnyStr) -> Address:
"""
Given any supported representation of an address
returns its canonical form (20 byte long string).
"""
return Address(decode_hex(to_normalized_address(address)))
def is_canonical_address(address: Any) -> bool:
"""
Returns `True` if the `value` is an address in its canonical form.
"""
if not is_bytes(address) or len(address) != 20:
return False
return address == to_canonical_address(address)
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool:
"""
Checks if both addresses are same or not.
"""
if not is_address(left) or not is_address(right):
raise ValueError("Both values must be valid addresses")
else:
return to_normalized_address(left) == to_normalized_address(right)
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address))
def is_checksum_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
return value == to_checksum_address(value)
def is_checksum_formatted_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).lower():
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).upper():
return False
else:
return True
|
ethereum/eth-utils
|
eth_utils/address.py
|
is_normalized_address
|
python
|
def is_normalized_address(value: Any) -> bool:
if not is_address(value):
return False
else:
return value == to_normalized_address(value)
|
Returns whether the provided value is an address in its normalized form.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/address.py#L68-L75
|
[
"def is_address(value: Any) -> bool:\n \"\"\"\n Checks if the given string in a supported value\n is an address in any of the known formats.\n \"\"\"\n if is_checksum_formatted_address(value):\n return is_checksum_address(value)\n elif is_hex_address(value):\n return True\n elif is_binary_address(value):\n return True\n else:\n return False\n",
"def to_normalized_address(value: AnyStr) -> HexAddress:\n \"\"\"\n Converts an address to its normalized hexadecimal representation.\n \"\"\"\n try:\n hex_address = hexstr_if_str(to_hex, value).lower()\n except AttributeError:\n raise TypeError(\n \"Value must be any string, instead got type {}\".format(type(value))\n )\n if is_address(hex_address):\n return HexAddress(hex_address)\n else:\n raise ValueError(\n \"Unknown format {}, attempted to normalize to {}\".format(value, hex_address)\n )\n"
] |
from typing import Any, AnyStr
from .crypto import keccak
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .conversions import hexstr_if_str, to_hex
from .types import is_bytes, is_text
from .typing import Address, AnyAddress, ChecksumAddress, HexAddress
def is_hex_address(value: Any) -> bool:
"""
Checks if the given string of text type is an address in hexadecimal encoded form.
"""
if not is_text(value):
return False
elif not is_hex(value):
return False
else:
unprefixed = remove_0x_prefix(value)
return len(unprefixed) == 40
def is_binary_address(value: Any) -> bool:
"""
Checks if the given string is an address in raw bytes form.
"""
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
def is_address(value: Any) -> bool:
"""
Checks if the given string in a supported value
is an address in any of the known formats.
"""
if is_checksum_formatted_address(value):
return is_checksum_address(value)
elif is_hex_address(value):
return True
elif is_binary_address(value):
return True
else:
return False
def to_normalized_address(value: AnyStr) -> HexAddress:
"""
Converts an address to its normalized hexadecimal representation.
"""
try:
hex_address = hexstr_if_str(to_hex, value).lower()
except AttributeError:
raise TypeError(
"Value must be any string, instead got type {}".format(type(value))
)
if is_address(hex_address):
return HexAddress(hex_address)
else:
raise ValueError(
"Unknown format {}, attempted to normalize to {}".format(value, hex_address)
)
def to_canonical_address(address: AnyStr) -> Address:
"""
Given any supported representation of an address
returns its canonical form (20 byte long string).
"""
return Address(decode_hex(to_normalized_address(address)))
def is_canonical_address(address: Any) -> bool:
"""
Returns `True` if the `value` is an address in its canonical form.
"""
if not is_bytes(address) or len(address) != 20:
return False
return address == to_canonical_address(address)
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool:
"""
Checks if both addresses are same or not.
"""
if not is_address(left) or not is_address(right):
raise ValueError("Both values must be valid addresses")
else:
return to_normalized_address(left) == to_normalized_address(right)
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address))
def is_checksum_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
return value == to_checksum_address(value)
def is_checksum_formatted_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).lower():
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).upper():
return False
else:
return True
|
ethereum/eth-utils
|
eth_utils/address.py
|
is_canonical_address
|
python
|
def is_canonical_address(address: Any) -> bool:
if not is_bytes(address) or len(address) != 20:
return False
return address == to_canonical_address(address)
|
Returns `True` if the `value` is an address in its canonical form.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/address.py#L86-L92
|
[
"def to_canonical_address(address: AnyStr) -> Address:\n \"\"\"\n Given any supported representation of an address\n returns its canonical form (20 byte long string).\n \"\"\"\n return Address(decode_hex(to_normalized_address(address)))\n",
"def is_bytes(value: Any) -> bool:\n return isinstance(value, bytes_types)\n"
] |
from typing import Any, AnyStr
from .crypto import keccak
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .conversions import hexstr_if_str, to_hex
from .types import is_bytes, is_text
from .typing import Address, AnyAddress, ChecksumAddress, HexAddress
def is_hex_address(value: Any) -> bool:
"""
Checks if the given string of text type is an address in hexadecimal encoded form.
"""
if not is_text(value):
return False
elif not is_hex(value):
return False
else:
unprefixed = remove_0x_prefix(value)
return len(unprefixed) == 40
def is_binary_address(value: Any) -> bool:
"""
Checks if the given string is an address in raw bytes form.
"""
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
def is_address(value: Any) -> bool:
"""
Checks if the given string in a supported value
is an address in any of the known formats.
"""
if is_checksum_formatted_address(value):
return is_checksum_address(value)
elif is_hex_address(value):
return True
elif is_binary_address(value):
return True
else:
return False
def to_normalized_address(value: AnyStr) -> HexAddress:
"""
Converts an address to its normalized hexadecimal representation.
"""
try:
hex_address = hexstr_if_str(to_hex, value).lower()
except AttributeError:
raise TypeError(
"Value must be any string, instead got type {}".format(type(value))
)
if is_address(hex_address):
return HexAddress(hex_address)
else:
raise ValueError(
"Unknown format {}, attempted to normalize to {}".format(value, hex_address)
)
def is_normalized_address(value: Any) -> bool:
"""
Returns whether the provided value is an address in its normalized form.
"""
if not is_address(value):
return False
else:
return value == to_normalized_address(value)
def to_canonical_address(address: AnyStr) -> Address:
"""
Given any supported representation of an address
returns its canonical form (20 byte long string).
"""
return Address(decode_hex(to_normalized_address(address)))
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool:
"""
Checks if both addresses are same or not.
"""
if not is_address(left) or not is_address(right):
raise ValueError("Both values must be valid addresses")
else:
return to_normalized_address(left) == to_normalized_address(right)
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address))
def is_checksum_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
return value == to_checksum_address(value)
def is_checksum_formatted_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).lower():
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).upper():
return False
else:
return True
|
ethereum/eth-utils
|
eth_utils/address.py
|
is_same_address
|
python
|
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool:
if not is_address(left) or not is_address(right):
raise ValueError("Both values must be valid addresses")
else:
return to_normalized_address(left) == to_normalized_address(right)
|
Checks if both addresses are same or not.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/address.py#L95-L102
|
[
"def is_address(value: Any) -> bool:\n \"\"\"\n Checks if the given string in a supported value\n is an address in any of the known formats.\n \"\"\"\n if is_checksum_formatted_address(value):\n return is_checksum_address(value)\n elif is_hex_address(value):\n return True\n elif is_binary_address(value):\n return True\n else:\n return False\n"
] |
from typing import Any, AnyStr
from .crypto import keccak
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .conversions import hexstr_if_str, to_hex
from .types import is_bytes, is_text
from .typing import Address, AnyAddress, ChecksumAddress, HexAddress
def is_hex_address(value: Any) -> bool:
"""
Checks if the given string of text type is an address in hexadecimal encoded form.
"""
if not is_text(value):
return False
elif not is_hex(value):
return False
else:
unprefixed = remove_0x_prefix(value)
return len(unprefixed) == 40
def is_binary_address(value: Any) -> bool:
"""
Checks if the given string is an address in raw bytes form.
"""
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
def is_address(value: Any) -> bool:
"""
Checks if the given string in a supported value
is an address in any of the known formats.
"""
if is_checksum_formatted_address(value):
return is_checksum_address(value)
elif is_hex_address(value):
return True
elif is_binary_address(value):
return True
else:
return False
def to_normalized_address(value: AnyStr) -> HexAddress:
"""
Converts an address to its normalized hexadecimal representation.
"""
try:
hex_address = hexstr_if_str(to_hex, value).lower()
except AttributeError:
raise TypeError(
"Value must be any string, instead got type {}".format(type(value))
)
if is_address(hex_address):
return HexAddress(hex_address)
else:
raise ValueError(
"Unknown format {}, attempted to normalize to {}".format(value, hex_address)
)
def is_normalized_address(value: Any) -> bool:
"""
Returns whether the provided value is an address in its normalized form.
"""
if not is_address(value):
return False
else:
return value == to_normalized_address(value)
def to_canonical_address(address: AnyStr) -> Address:
"""
Given any supported representation of an address
returns its canonical form (20 byte long string).
"""
return Address(decode_hex(to_normalized_address(address)))
def is_canonical_address(address: Any) -> bool:
"""
Returns `True` if the `value` is an address in its canonical form.
"""
if not is_bytes(address) or len(address) != 20:
return False
return address == to_canonical_address(address)
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address))
def is_checksum_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
return value == to_checksum_address(value)
def is_checksum_formatted_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).lower():
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).upper():
return False
else:
return True
|
ethereum/eth-utils
|
eth_utils/address.py
|
to_checksum_address
|
python
|
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address))
|
Makes a checksum address given a supported format.
|
train
|
https://github.com/ethereum/eth-utils/blob/d9889753a8e016d2fcd64ade0e2db3844486551d/eth_utils/address.py#L105-L122
|
[
"def to_normalized_address(value: AnyStr) -> HexAddress:\n \"\"\"\n Converts an address to its normalized hexadecimal representation.\n \"\"\"\n try:\n hex_address = hexstr_if_str(to_hex, value).lower()\n except AttributeError:\n raise TypeError(\n \"Value must be any string, instead got type {}\".format(type(value))\n )\n if is_address(hex_address):\n return HexAddress(hex_address)\n else:\n raise ValueError(\n \"Unknown format {}, attempted to normalize to {}\".format(value, hex_address)\n )\n",
"def keccak(\n primitive: Union[bytes, int, bool] = None, hexstr: str = None, text: str = None\n) -> bytes:\n return keccak_256(to_bytes(primitive, hexstr, text))\n",
"def add_0x_prefix(value: str) -> str:\n if is_0x_prefixed(value):\n return value\n return \"0x\" + value\n",
"def encode_hex(value: AnyStr) -> str:\n if not is_string(value):\n raise TypeError(\"Value must be an instance of str or unicode\")\n binary_hex = codecs.encode(value, \"hex\") # type: ignore\n return add_0x_prefix(binary_hex.decode(\"ascii\"))\n",
"def remove_0x_prefix(value: str) -> str:\n if is_0x_prefixed(value):\n return value[2:]\n return value\n"
] |
from typing import Any, AnyStr
from .crypto import keccak
from .hexadecimal import add_0x_prefix, decode_hex, encode_hex, is_hex, remove_0x_prefix
from .conversions import hexstr_if_str, to_hex
from .types import is_bytes, is_text
from .typing import Address, AnyAddress, ChecksumAddress, HexAddress
def is_hex_address(value: Any) -> bool:
"""
Checks if the given string of text type is an address in hexadecimal encoded form.
"""
if not is_text(value):
return False
elif not is_hex(value):
return False
else:
unprefixed = remove_0x_prefix(value)
return len(unprefixed) == 40
def is_binary_address(value: Any) -> bool:
"""
Checks if the given string is an address in raw bytes form.
"""
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
def is_address(value: Any) -> bool:
"""
Checks if the given string in a supported value
is an address in any of the known formats.
"""
if is_checksum_formatted_address(value):
return is_checksum_address(value)
elif is_hex_address(value):
return True
elif is_binary_address(value):
return True
else:
return False
def to_normalized_address(value: AnyStr) -> HexAddress:
"""
Converts an address to its normalized hexadecimal representation.
"""
try:
hex_address = hexstr_if_str(to_hex, value).lower()
except AttributeError:
raise TypeError(
"Value must be any string, instead got type {}".format(type(value))
)
if is_address(hex_address):
return HexAddress(hex_address)
else:
raise ValueError(
"Unknown format {}, attempted to normalize to {}".format(value, hex_address)
)
def is_normalized_address(value: Any) -> bool:
"""
Returns whether the provided value is an address in its normalized form.
"""
if not is_address(value):
return False
else:
return value == to_normalized_address(value)
def to_canonical_address(address: AnyStr) -> Address:
"""
Given any supported representation of an address
returns its canonical form (20 byte long string).
"""
return Address(decode_hex(to_normalized_address(address)))
def is_canonical_address(address: Any) -> bool:
"""
Returns `True` if the `value` is an address in its canonical form.
"""
if not is_bytes(address) or len(address) != 20:
return False
return address == to_canonical_address(address)
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool:
"""
Checks if both addresses are same or not.
"""
if not is_address(left) or not is_address(right):
raise ValueError("Both values must be valid addresses")
else:
return to_normalized_address(left) == to_normalized_address(right)
def is_checksum_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
return value == to_checksum_address(value)
def is_checksum_formatted_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).lower():
return False
elif remove_0x_prefix(value) == remove_0x_prefix(value).upper():
return False
else:
return True
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/arduino.py
|
ArduinoBoard.open
|
python
|
def open(self):
if not self._is_connected:
print("Connecting to arduino on {}... ".format(self.device),end="")
self.comm = serial.Serial()
self.comm.port = self.device
self.comm.baudrate = self.baud_rate
self.comm.timeout = self.timeout
self.dtr = self.enable_dtr
self.comm.open()
time.sleep(self.settle_time)
self._is_connected = True
print("done.")
|
Open the serial connection.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/arduino.py#L147-L166
| null |
class ArduinoBoard:
"""
Class for connecting to an Arduino board over USB using PyCmdMessenger.
The board holds the serial handle (which, in turn, holds the device name,
baud rate, and timeout) and the board parameters (size of data types in
bytes, etc.). The default parameters are for an ArduinoUno board.
"""
def __init__(self,
device,
baud_rate=9600,
timeout=1.0,
settle_time=2.0,
enable_dtr=False,
int_bytes=2,
long_bytes=4,
float_bytes=4,
double_bytes=4):
"""
Serial connection parameters:
device: serial device (e.g. /dev/ttyACM0)
baud_rate: baud rate set in the compiled sketch
timeout: timeout for serial reading and writing
settle_time: how long to wait before trying to access serial port
enable_dtr: use DTR (set to False to prevent arduino reset on connect)
Board input parameters:
int_bytes: number of bytes to store an integer
long_bytes: number of bytes to store a long
float_bytes: number of bytes to store a float
double_bytes: number of bytes to store a double
These can be looked up here:
https://www.arduino.cc/en/Reference/HomePage (under data types)
The default parameters work for ATMega328p boards.
Note that binary strings are passed as little-endian (which should
work for all arduinos)
"""
self.device = device
self.baud_rate = baud_rate
self.timeout = timeout
self.settle_time = settle_time
self.enable_dtr = enable_dtr
self.int_bytes = int_bytes
self.long_bytes = long_bytes
self.float_bytes = float_bytes
self.double_bytes = double_bytes
self.baud_rate = baud_rate
# Open up the serial port
self._is_connected = False
self.open()
#----------------------------------------------------------------------
# Figure out proper type limits given the board specifications
#----------------------------------------------------------------------
self.int_min = -2**(8*self.int_bytes-1)
self.int_max = 2**(8*self.int_bytes-1) - 1
self.unsigned_int_min = 0
self.unsigned_int_max = 2**(8*self.int_bytes) - 1
self.long_min = -2**(8*self.long_bytes-1)
self.long_max = 2**(8*self.long_bytes-1) - 1
self.unsigned_long_min = 0
self.unsigned_long_max = 2**(8*self.long_bytes)-1
# Set to either IEEE 754 binary32 bit or binary64 bit
if self.float_bytes == 4:
self.float_min = -3.4028235E+38
self.float_max = 3.4028235E+38
elif self.float_bytes == 8:
self.float_min = -1e308
self.float_max = 1e308
else:
err = "float bytes should be 4 (32 bit) or 8 (64 bit)"
raise ValueError(err)
if self.double_bytes == 4:
self.double_min = -3.4028235E+38
self.double_max = 3.4028235E+38
elif self.double_bytes == 8:
self.double_min = -1e308
self.double_max = 1e308
else:
err = "double bytes should be 4 (32 bit) or 8 (64 bit)"
raise ValueError(err)
#----------------------------------------------------------------------
# Create a self.XXX_type for each type based on its byte number. This
# type can then be passed into struct.pack and struct.unpack calls to
# properly format the bytes strings.
#----------------------------------------------------------------------
INTEGER_TYPE = {2:"<h",4:"<i",8:"<l"}
UNSIGNED_INTEGER_TYPE = {2:"<H",4:"<I",8:"<L"}
FLOAT_TYPE = {4:"<f",8:"<d"}
try:
self.int_type = INTEGER_TYPE[self.int_bytes]
self.unsigned_int_type = UNSIGNED_INTEGER_TYPE[self.int_bytes]
except KeyError:
keys = list(INTEGER_TYPE.keys())
keys.sort()
err = "integer bytes must be one of {}".format(keys())
raise ValueError(err)
try:
self.long_type = INTEGER_TYPE[self.long_bytes]
self.unsigned_long_type = UNSIGNED_INTEGER_TYPE[self.long_bytes]
except KeyError:
keys = list(INTEGER_TYPE.keys())
keys.sort()
err = "long bytes must be one of {}".format(keys())
raise ValueError(err)
try:
self.float_type = FLOAT_TYPE[self.float_bytes]
self.double_type = FLOAT_TYPE[self.double_bytes]
except KeyError:
keys = list(self.FLOAT_TYPE.keys())
keys.sort()
err = "float and double bytes must be one of {}".format(keys())
raise ValueError(err)
def open(self):
"""
Open the serial connection.
"""
if not self._is_connected:
print("Connecting to arduino on {}... ".format(self.device),end="")
self.comm = serial.Serial()
self.comm.port = self.device
self.comm.baudrate = self.baud_rate
self.comm.timeout = self.timeout
self.dtr = self.enable_dtr
self.comm.open()
time.sleep(self.settle_time)
self._is_connected = True
print("done.")
def read(self):
"""
Wrap serial read method.
"""
return self.comm.read()
def readline(self):
"""
Wrap serial readline method.
"""
return self.comm.readline()
def write(self,msg):
"""
Wrap serial write method.
"""
self.comm.write(msg)
def close(self):
"""
Close serial connection.
"""
if self._is_connected:
self.comm.close()
self._is_connected = False
@property
def connected(self):
"""
Return connection state. Connected (True), disconnected (False).
"""
return self._is_connected
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/arduino.py
|
ArduinoBoard.close
|
python
|
def close(self):
if self._is_connected:
self.comm.close()
self._is_connected = False
|
Close serial connection.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/arduino.py#L189-L196
| null |
class ArduinoBoard:
"""
Class for connecting to an Arduino board over USB using PyCmdMessenger.
The board holds the serial handle (which, in turn, holds the device name,
baud rate, and timeout) and the board parameters (size of data types in
bytes, etc.). The default parameters are for an ArduinoUno board.
"""
def __init__(self,
device,
baud_rate=9600,
timeout=1.0,
settle_time=2.0,
enable_dtr=False,
int_bytes=2,
long_bytes=4,
float_bytes=4,
double_bytes=4):
"""
Serial connection parameters:
device: serial device (e.g. /dev/ttyACM0)
baud_rate: baud rate set in the compiled sketch
timeout: timeout for serial reading and writing
settle_time: how long to wait before trying to access serial port
enable_dtr: use DTR (set to False to prevent arduino reset on connect)
Board input parameters:
int_bytes: number of bytes to store an integer
long_bytes: number of bytes to store a long
float_bytes: number of bytes to store a float
double_bytes: number of bytes to store a double
These can be looked up here:
https://www.arduino.cc/en/Reference/HomePage (under data types)
The default parameters work for ATMega328p boards.
Note that binary strings are passed as little-endian (which should
work for all arduinos)
"""
self.device = device
self.baud_rate = baud_rate
self.timeout = timeout
self.settle_time = settle_time
self.enable_dtr = enable_dtr
self.int_bytes = int_bytes
self.long_bytes = long_bytes
self.float_bytes = float_bytes
self.double_bytes = double_bytes
self.baud_rate = baud_rate
# Open up the serial port
self._is_connected = False
self.open()
#----------------------------------------------------------------------
# Figure out proper type limits given the board specifications
#----------------------------------------------------------------------
self.int_min = -2**(8*self.int_bytes-1)
self.int_max = 2**(8*self.int_bytes-1) - 1
self.unsigned_int_min = 0
self.unsigned_int_max = 2**(8*self.int_bytes) - 1
self.long_min = -2**(8*self.long_bytes-1)
self.long_max = 2**(8*self.long_bytes-1) - 1
self.unsigned_long_min = 0
self.unsigned_long_max = 2**(8*self.long_bytes)-1
# Set to either IEEE 754 binary32 bit or binary64 bit
if self.float_bytes == 4:
self.float_min = -3.4028235E+38
self.float_max = 3.4028235E+38
elif self.float_bytes == 8:
self.float_min = -1e308
self.float_max = 1e308
else:
err = "float bytes should be 4 (32 bit) or 8 (64 bit)"
raise ValueError(err)
if self.double_bytes == 4:
self.double_min = -3.4028235E+38
self.double_max = 3.4028235E+38
elif self.double_bytes == 8:
self.double_min = -1e308
self.double_max = 1e308
else:
err = "double bytes should be 4 (32 bit) or 8 (64 bit)"
raise ValueError(err)
#----------------------------------------------------------------------
# Create a self.XXX_type for each type based on its byte number. This
# type can then be passed into struct.pack and struct.unpack calls to
# properly format the bytes strings.
#----------------------------------------------------------------------
INTEGER_TYPE = {2:"<h",4:"<i",8:"<l"}
UNSIGNED_INTEGER_TYPE = {2:"<H",4:"<I",8:"<L"}
FLOAT_TYPE = {4:"<f",8:"<d"}
try:
self.int_type = INTEGER_TYPE[self.int_bytes]
self.unsigned_int_type = UNSIGNED_INTEGER_TYPE[self.int_bytes]
except KeyError:
keys = list(INTEGER_TYPE.keys())
keys.sort()
err = "integer bytes must be one of {}".format(keys())
raise ValueError(err)
try:
self.long_type = INTEGER_TYPE[self.long_bytes]
self.unsigned_long_type = UNSIGNED_INTEGER_TYPE[self.long_bytes]
except KeyError:
keys = list(INTEGER_TYPE.keys())
keys.sort()
err = "long bytes must be one of {}".format(keys())
raise ValueError(err)
try:
self.float_type = FLOAT_TYPE[self.float_bytes]
self.double_type = FLOAT_TYPE[self.double_bytes]
except KeyError:
keys = list(self.FLOAT_TYPE.keys())
keys.sort()
err = "float and double bytes must be one of {}".format(keys())
raise ValueError(err)
def open(self):
"""
Open the serial connection.
"""
if not self._is_connected:
print("Connecting to arduino on {}... ".format(self.device),end="")
self.comm = serial.Serial()
self.comm.port = self.device
self.comm.baudrate = self.baud_rate
self.comm.timeout = self.timeout
self.dtr = self.enable_dtr
self.comm.open()
time.sleep(self.settle_time)
self._is_connected = True
print("done.")
def read(self):
"""
Wrap serial read method.
"""
return self.comm.read()
def readline(self):
"""
Wrap serial readline method.
"""
return self.comm.readline()
def write(self,msg):
"""
Wrap serial write method.
"""
self.comm.write(msg)
@property
def connected(self):
"""
Return connection state. Connected (True), disconnected (False).
"""
return self._is_connected
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger.send
|
python
|
def send(self,cmd,*args,arg_formats=None):
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
|
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L120-L173
|
[
"def _treat_star_format(self,arg_format_list,args):\n \"\"\"\n Deal with \"*\" format if specified.\n \"\"\"\n\n num_stars = len([a for a in arg_format_list if a == \"*\"])\n if num_stars > 0:\n\n # Make sure the repeated format argument only occurs once, is last,\n # and that there is at least one format in addition to it.\n if num_stars == 1 and arg_format_list[-1] == \"*\" and len(arg_format_list) > 1:\n\n # Trim * from end\n arg_format_list = arg_format_list[:-1]\n\n # If we need extra arguments...\n if len(arg_format_list) < len(args):\n f = arg_format_list[-1]\n len_diff = len(args) - len(arg_format_list)\n tmp = list(arg_format_list)\n tmp.extend([f for i in range(len_diff)])\n arg_format_list = \"\".join(tmp)\n else:\n err = \"'*' format must occur only once, be at end of string, and be preceded by at least one other format.\"\n raise ValueError(err)\n\n return arg_format_list \n",
"def write(self,msg):\n \"\"\"\n Wrap serial write method.\n \"\"\"\n\n self.comm.write(msg)\n"
] |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger.receive
|
python
|
def receive(self,arg_formats=None):
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
|
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L175-L289
|
[
"def _treat_star_format(self,arg_format_list,args):\n \"\"\"\n Deal with \"*\" format if specified.\n \"\"\"\n\n num_stars = len([a for a in arg_format_list if a == \"*\"])\n if num_stars > 0:\n\n # Make sure the repeated format argument only occurs once, is last,\n # and that there is at least one format in addition to it.\n if num_stars == 1 and arg_format_list[-1] == \"*\" and len(arg_format_list) > 1:\n\n # Trim * from end\n arg_format_list = arg_format_list[:-1]\n\n # If we need extra arguments...\n if len(arg_format_list) < len(args):\n f = arg_format_list[-1]\n len_diff = len(args) - len(arg_format_list)\n tmp = list(arg_format_list)\n tmp.extend([f for i in range(len_diff)])\n arg_format_list = \"\".join(tmp)\n else:\n err = \"'*' format must occur only once, be at end of string, and be preceded by at least one other format.\"\n raise ValueError(err)\n\n return arg_format_list \n",
"def read(self):\n \"\"\"\n Wrap serial read method.\n \"\"\"\n\n return self.comm.read()\n"
] |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._treat_star_format
|
python
|
def _treat_star_format(self,arg_format_list,args):
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
|
Deal with "*" format if specified.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L291-L317
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_char
|
python
|
def _send_char(self,value):
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
|
Convert a single char to a bytes object.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L319-L339
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_byte
|
python
|
def _send_byte(self,value):
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
|
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L341-L362
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_int
|
python
|
def _send_int(self,value):
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
|
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L364-L385
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_unsigned_int
|
python
|
def _send_unsigned_int(self,value):
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
|
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L387-L407
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_long
|
python
|
def _send_long(self,value):
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
|
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L409-L430
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_unsigned_long
|
python
|
def _send_unsigned_long(self,value):
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
|
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L432-L453
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_float
|
python
|
def _send_float(self,value):
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
|
Return a float as a IEEE 754 format bytes object.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L455-L470
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_double
|
python
|
def _send_double(self,value):
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
|
Return a float as a IEEE 754 format bytes object.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L472-L487
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_string
|
python
|
def _send_string(self,value):
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
|
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L489-L498
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_bool
|
python
|
def _send_bool(self,value):
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
|
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L500-L510
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_guess
|
python
|
def _send_guess(self,value):
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
|
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L512-L531
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._recv_string
|
python
|
def _recv_string(self,value):
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
|
Recieve a binary (bytes) string, returning a python string.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L588-L601
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._recv_guess
|
python
|
def _recv_guess(self,value):
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
|
train
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L610-L640
| null |
class CmdMessenger:
"""
Basic interface for interfacing over a serial connection to an arduino
using the CmdMessenger library.
"""
def __init__(self,
board_instance,
commands,
field_separator=",",
command_separator=";",
escape_separator="/",
warnings=True):
"""
Input:
board_instance:
instance of ArduinoBoard initialized with correct serial
connection (points to correct serial with correct baud rate) and
correct board parameters (float bytes, etc.)
commands:
a list or tuple of commands specified in the arduino .ino file
*in the same order* they are listed there. commands should be
a list of lists, where the first element in the list specifies
the command name and the second the formats for the arguments.
(e.g. commands = [["who_are_you",""],["my_name_is","s"]])
field_separator:
character that separates fields within a message
Default: ","
command_separator:
character that separates messages (commands) from each other
Default: ";"
escape_separator:
escape character to allow separators within messages.
Default: "/"
warnings:
warnings for user
Default: True
The separators and escape_separator should match what's
in the arduino code that initializes the CmdMessenger. The default
separator values match the default values as of CmdMessenger 4.0.
"""
self.board = board_instance
if not self.board.connected:
err = "Arduino not connected on {}\n".format(self.board.device)
raise IOError(err)
self.commands = commands[:]
self.field_separator = field_separator
self.command_separator = command_separator
self.escape_separator = escape_separator
self.give_warnings = warnings
self._cmd_name_to_int = {}
self._int_to_cmd_name = {}
self._cmd_name_to_format = {}
for i, c in enumerate(commands):
self._cmd_name_to_int[c[0]] = i
self._int_to_cmd_name[i] = c[0]
self._cmd_name_to_format[c[0]] = c[1]
self._byte_field_sep = self.field_separator.encode("ascii")
self._byte_command_sep = self.command_separator.encode("ascii")
self._byte_escape_sep = self.escape_separator.encode("ascii")
self._escaped_characters = [self._byte_field_sep,
self._byte_command_sep,
self._byte_escape_sep,
b'\0']
self._null_escape_re = re.compile(b'\0')
self._escape_re = re.compile("([{}{}{}\0])".format(self.field_separator,
self.command_separator,
self.escape_separator).encode('ascii'))
self._send_methods = {"c":self._send_char,
"b":self._send_byte,
"i":self._send_int,
"I":self._send_unsigned_int,
"l":self._send_long,
"L":self._send_unsigned_long,
"f":self._send_float,
"d":self._send_double,
"s":self._send_string,
"?":self._send_bool,
"g":self._send_guess}
self._recv_methods = {"c":self._recv_char,
"b":self._recv_byte,
"i":self._recv_int,
"I":self._recv_unsigned_int,
"l":self._recv_long,
"L":self._recv_unsigned_long,
"f":self._recv_float,
"d":self._recv_double,
"s":self._recv_string,
"?":self._recv_bool,
"g":self._recv_guess}
def send(self,cmd,*args,arg_formats=None):
"""
Send a command (which may or may not have associated arguments) to an
arduino using the CmdMessage protocol. The command and any parameters
should be passed as direct arguments to send.
arg_formats is an optional string that specifies the formats to use for
each argument when passed to the arduino. If specified here,
arg_formats supercedes formats specified on initialization.
"""
# Turn the command into an integer.
try:
command_as_int = self._cmd_name_to_int[cmd]
except KeyError:
err = "Command '{}' not recognized.\n".format(cmd)
raise ValueError(err)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(args))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,args)
if len(args) > 0:
if len(arg_format_list) != len(args):
err = "Number of argument formats must match the number of arguments."
raise ValueError(err)
# Go through each argument and create a bytes representation in the
# proper format to send. Escape appropriate characters.
fields = ["{}".format(command_as_int).encode("ascii")]
for i, a in enumerate(args):
fields.append(self._send_methods[arg_format_list[i]](a))
fields[-1] = self._escape_re.sub(self._byte_escape_sep + r"\1".encode("ascii"),fields[-1])
# Make something that looks like cmd,field1,field2,field3;
compiled_bytes = self._byte_field_sep.join(fields) + self._byte_command_sep
# Send the message.
self.board.write(compiled_bytes)
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value)
def _send_double(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.double_type,value)
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
def _send_guess(self,value):
"""
Send the argument as a string in a way that should (probably, maybe!) be
processed properly by C++ calls like atoi, atof, etc. This method is
NOT RECOMMENDED, particularly for floats, because values are often
mangled silently. Instead, specify a format (e.g. "f") and use the
CmdMessenger::readBinArg<CAST> method (e.g. c.readBinArg<float>();) to
read the values on the arduino side.
"""
if type(value) != str and type(value) != bytes and self.give_warnings:
w = "Warning: Sending {} as a string. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
if type(value) == float:
return "{:.10e}".format(value).encode("ascii")
elif type(value) == bool:
return "{}".format(int(value)).encode("ascii")
else:
return self._send_string(value)
def _recv_char(self,value):
"""
Recieve a char in binary format, returning as string.
"""
return struct.unpack("c",value)[0].decode("ascii")
def _recv_byte(self,value):
"""
Recieve a byte in binary format, returning as python int.
"""
return struct.unpack("B",value)[0]
def _recv_int(self,value):
"""
Recieve an int in binary format, returning as python int.
"""
return struct.unpack(self.board.int_type,value)[0]
def _recv_unsigned_int(self,value):
"""
Recieve an unsigned int in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_int_type,value)[0]
def _recv_long(self,value):
"""
Recieve a long in binary format, returning as python int.
"""
return struct.unpack(self.board.long_type,value)[0]
def _recv_unsigned_long(self,value):
"""
Recieve an unsigned long in binary format, returning as python int.
"""
return struct.unpack(self.board.unsigned_long_type,value)[0]
def _recv_float(self,value):
"""
Recieve a float in binary format, returning as python float.
"""
return struct.unpack(self.board.float_type,value)[0]
def _recv_double(self,value):
"""
Recieve a double in binary format, returning as python float.
"""
return struct.unpack(self.board.double_type,value)[0]
def _recv_string(self,value):
"""
Recieve a binary (bytes) string, returning a python string.
"""
s = value.decode('ascii')
# Strip null characters
s = s.strip("\x00")
# Strip other white space
s = s.strip()
return s
def _recv_bool(self,value):
"""
Receive a binary bool, return as python bool.
"""
return struct.unpack("?",value)[0]
|
benmoran56/esper
|
esper.py
|
World.clear_database
|
python
|
def clear_database(self) -> None:
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
|
Remove all Entities and Components from the World.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L46-L52
|
[
"def clear_cache(self) -> None:\n self.get_component.cache_clear()\n self.get_components.cache_clear()\n"
] |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.add_processor
|
python
|
def add_processor(self, processor_instance: Processor, priority=0) -> None:
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
|
Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L54-L65
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.remove_processor
|
python
|
def remove_processor(self, processor_type: Processor) -> None:
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
|
Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L67-L75
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.get_processor
|
python
|
def get_processor(self, processor_type: Type[P]) -> P:
for processor in self._processors:
if type(processor) == processor_type:
return processor
|
Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L77-L89
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.create_entity
|
python
|
def create_entity(self, *components) -> int:
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
|
Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L91-L109
|
[
"def add_component(self, entity: int, component_instance: Any) -> None:\n \"\"\"Add a new Component instance to an Entity.\n\n Add a Component instance to an Entiy. If a Component of the same type\n is already assigned to the Entity, it will be replaced.\n\n :param entity: The Entity to associate the Component with.\n :param component_instance: A Component instance.\n \"\"\"\n component_type = type(component_instance)\n\n if component_type not in self._components:\n self._components[component_type] = set()\n\n self._components[component_type].add(entity)\n\n if entity not in self._entities:\n self._entities[entity] = {}\n\n self._entities[entity][component_type] = component_instance\n self.clear_cache()\n"
] |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.delete_entity
|
python
|
def delete_entity(self, entity: int, immediate=False) -> None:
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
|
Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L111-L135
|
[
"def clear_cache(self) -> None:\n self.get_component.cache_clear()\n self.get_components.cache_clear()\n"
] |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.component_for_entity
|
python
|
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
return self._entities[entity][component_type]
|
Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L137-L149
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.components_for_entity
|
python
|
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
return tuple(self._entities[entity].values())
|
Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L151-L165
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.has_component
|
python
|
def has_component(self, entity: int, component_type: Any) -> bool:
return component_type in self._entities[entity]
|
Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L167-L175
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.add_component
|
python
|
def add_component(self, entity: int, component_instance: Any) -> None:
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
|
Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L177-L197
|
[
"def clear_cache(self) -> None:\n self.get_component.cache_clear()\n self.get_components.cache_clear()\n"
] |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.remove_component
|
python
|
def remove_component(self, entity: int, component_type: Any) -> int:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
|
Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L199-L222
|
[
"def clear_cache(self) -> None:\n self.get_component.cache_clear()\n self.get_components.cache_clear()\n"
] |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World._get_component
|
python
|
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
|
Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L224-L233
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World._get_components
|
python
|
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
|
Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L235-L249
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.try_component
|
python
|
def try_component(self, entity: int, component_type: Type):
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
|
Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L259-L274
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World._clear_dead_entities
|
python
|
def _clear_dead_entities(self):
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
|
Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L276-L294
|
[
"def clear_cache(self) -> None:\n self.get_component.cache_clear()\n self.get_components.cache_clear()\n"
] |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World._timed_process
|
python
|
def _timed_process(self, *args, **kwargs):
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
|
Track Processor execution time for benchmarking.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L300-L306
| null |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def process(self, *args, **kwargs):
"""Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
"""
self._clear_dead_entities()
self._process(*args, **kwargs)
|
benmoran56/esper
|
esper.py
|
World.process
|
python
|
def process(self, *args, **kwargs):
self._clear_dead_entities()
self._process(*args, **kwargs)
|
Call the process method on all Processors, in order of their priority.
Call the *process* method on all assigned Processors, respecting their
optional priority setting. In addition, any Entities that were marked
for deletion since the last call to *World.process*, will be deleted
at the start of this method call.
:param args: Optional arguments that will be passed through to the
*process* method of all Processors.
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L308-L320
|
[
"def _clear_dead_entities(self):\n \"\"\"Finalize deletion of any Entities that are marked dead.\n\n In the interest of performance, this method duplicates code from the\n `delete_entity` method. If that method is changed, those changes should\n be duplicated here as well.\n \"\"\"\n for entity in self._dead_entities:\n\n for component_type in self._entities[entity]:\n self._components[component_type].discard(entity)\n\n if not self._components[component_type]:\n del self._components[component_type]\n\n del self._entities[entity]\n\n self._dead_entities.clear()\n self.clear_cache()\n",
"def _process(self, *args, **kwargs):\n for processor in self._processors:\n processor.process(*args, **kwargs)\n"
] |
class World:
def __init__(self, timed=False):
"""A World object keeps track of all Entities, Components, and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = [] # type: List[Processor]
self._next_entity_id = 0
self._components = {}
self._entities = {} # type: Dict[int, Any]
self._dead_entities = set()
if timed:
self.process_times = {}
self._process = self._timed_process
def clear_cache(self) -> None:
self.get_component.cache_clear()
self.get_components.cache_clear()
def clear_database(self) -> None:
"""Remove all Entities and Components from the World."""
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache()
def add_processor(self, processor_instance: Processor, priority=0) -> None:
"""Add a Processor instance to the World.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True)
def remove_processor(self, processor_type: Processor) -> None:
"""Remove a Processor from the World, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def get_processor(self, processor_type: Type[P]) -> P:
"""Get a Processor instance, by type.
This method returns a Processor instance by type. This could be
useful in certain situations, such as wanting to call a method on a
Processor, from within another Processor.
:param processor_type: The type of the Processor you wish to retrieve.
:return: A Processor instance that has previously been added to the World.
"""
for processor in self._processors:
if type(processor) == processor_type:
return processor
def create_entity(self, *components) -> int:
"""Create a new Entity.
This method returns an Entity ID, which is just a plain integer.
You can optionally pass one or more Component instances to be
assigned to the Entity.
:param components: Optional components to be assigned to the
entity on creation.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity)
def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
"""Retrieve a Component instance for a specific Entity.
Retrieve a Component instance for a specific Entity. In some cases,
it may be necessary to access a specific Component instance.
For example: directly modifying a Component to handle user input.
Raises a KeyError if the given Entity and Component do not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: The Component instance requested for the given Entity ID.
"""
return self._entities[entity][component_type]
def components_for_entity(self, entity: int) -> Tuple[C, ...]:
"""Retrieve all Components for a specific Entity, as a Tuple.
Retrieve all Components for a specific Entity. The method is probably
not appropriate to use in your Processors, but might be useful for
saving state, or passing specific Components between World instances.
Unlike most other methods, this returns all of the Components as a
Tuple in one batch, instead of returning a Generator for iteration.
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID to retrieve the Components for.
:return: A tuple of all Component instances that have been
assigned to the passed Entity ID.
"""
return tuple(self._entities[entity].values())
def has_component(self, entity: int, component_type: Any) -> bool:
"""Check if a specific Entity has a Component of a certain type.
:param entity: The Entity you are querying.
:param component_type: The type of Component to check for.
:return: True if the Entity has a Component of this type,
otherwise False
"""
return component_type in self._entities[entity]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def remove_component(self, entity: int, component_type: Any) -> int:
"""Remove a Component instance from an Entity, by type.
A Component instance can be removed by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
Raises a KeyError if either the given entity or Component type does
not exist in the database.
:param entity: The Entity to remove the Component from.
:param component_type: The type of the Component to remove.
"""
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for Entity, (Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
@_lru_cache()
def get_component(self, component_type: Type[C]) -> List[Tuple[int, C]]:
return [query for query in self._get_component(component_type)]
@_lru_cache()
def get_components(self, *component_types: Type):
return [query for query in self._get_components(*component_types)]
def try_component(self, entity: int, component_type: Type):
"""Try to get a single component type for an Entity.
This method will return the requested Component if it exists, but
will pass silently if it does not. This allows a way to access optional
Components that may or may not exist.
:param entity: The Entity ID to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A iterator containg the single Component instance requested,
which is empty if the component doesn't exist.
"""
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None
def _clear_dead_entities(self):
"""Finalize deletion of any Entities that are marked dead.
In the interest of performance, this method duplicates code from the
`delete_entity` method. If that method is changed, those changes should
be duplicated here as well.
"""
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache()
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def _timed_process(self, *args, **kwargs):
"""Track Processor execution time for benchmarking."""
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time
|
benmoran56/esper
|
examples/pysdl2_example.py
|
texture_from_image
|
python
|
def texture_from_image(renderer, image_name):
soft_surface = ext.load_image(image_name)
texture = SDL_CreateTextureFromSurface(renderer.renderer, soft_surface)
SDL_FreeSurface(soft_surface)
return texture
|
Create an SDL2 Texture from an image file
|
train
|
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/examples/pysdl2_example.py#L76-L81
| null |
from sdl2 import *
import sdl2.ext as ext
import esper
RESOLUTION = 720, 480
##################################
# Define some Components:
##################################
class Velocity:
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
class Renderable:
def __init__(self, texture, width, height, posx, posy):
self.texture = texture
self.x = posx
self.y = posy
self.w = width
self.h = height
################################
# Define some Processors:
################################
class MovementProcessor(esper.Processor):
def __init__(self, minx, maxx, miny, maxy):
super().__init__()
self.minx = minx
self.maxx = maxx
self.miny = miny
self.maxy = maxy
def process(self):
# This will iterate over every Entity that has BOTH of these components:
for ent, (vel, rend) in self.world.get_components(Velocity, Renderable):
# Update the Renderable Component's position by it's Velocity:
rend.x += vel.x
rend.y += vel.y
# An example of keeping the sprite inside screen boundaries. Basically,
# adjust the position back inside screen boundaries if it tries to go outside:
rend.x = max(self.minx, rend.x)
rend.y = max(self.miny, rend.y)
rend.x = min(self.maxx - rend.w, rend.x)
rend.y = min(self.maxy - rend.h, rend.y)
class RenderProcessor(esper.Processor):
def __init__(self, renderer, clear_color=(0, 0, 0)):
super().__init__()
self.renderer = renderer
self.clear_color = clear_color
def process(self):
# Clear the window:
self.renderer.clear(self.clear_color)
# Create a destination Rect for the texture:
destination = SDL_Rect(0, 0, 0, 0)
# This will iterate over every Entity that has this Component, and blit it:
for ent, rend in self.world.get_component(Renderable):
destination.x = int(rend.x)
destination.y = int(rend.y)
destination.w = rend.w
destination.h = rend.h
SDL_RenderCopy(self.renderer.renderer, rend.texture, None, destination)
self.renderer.present()
################################
# Some SDL2 Functions:
################################
################################
# The main core of the program:
################################
def run():
# Initialize PySDL2 stuff
ext.init()
window = ext.Window(title="Esper PySDL2 example", size=RESOLUTION)
renderer = ext.Renderer(target=window)
window.show()
# Initialize Esper world, and create a "player" Entity with a few Components.
world = esper.World()
player = world.create_entity()
world.add_component(player, Velocity(x=0, y=0))
world.add_component(player, Renderable(texture=texture_from_image(renderer, "redsquare.png"),
width=64, height=64, posx=100, posy=100))
# Another motionless Entity:
enemy = world.create_entity()
world.add_component(enemy, Renderable(texture=texture_from_image(renderer, "bluesquare.png"),
width=64, height=64, posx=400, posy=250))
# Create some Processor instances, and asign them to be processed.
render_processor = RenderProcessor(renderer=renderer)
movement_processor = MovementProcessor(minx=0, maxx=RESOLUTION[0], miny=0, maxy=RESOLUTION[1])
world.add_processor(render_processor)
world.add_processor(movement_processor)
# A simple main loop
running = True
while running:
start_time = SDL_GetTicks()
for event in ext.get_events():
if event.type == SDL_QUIT:
running = False
break
if event.type == SDL_KEYDOWN:
if event.key.keysym.sym == SDLK_UP:
# Here is a way to directly access a specific Entity's Velocity
# Component's attribute (y) without making a temporary variable.
world.component_for_entity(player, Velocity).y = -3
elif event.key.keysym.sym == SDLK_DOWN:
# For clarity, here is an alternate way in which a temporary variable
# is created and modified. The previous way above is recommended instead.
player_velocity_component = world.component_for_entity(player, Velocity)
player_velocity_component.y = 3
elif event.key.keysym.sym == SDLK_LEFT:
world.component_for_entity(player, Velocity).x = -3
elif event.key.keysym.sym == SDLK_RIGHT:
world.component_for_entity(player, Velocity).x = 3
elif event.key.keysym.sym == SDLK_ESCAPE:
running = False
break
elif event.type == SDL_KEYUP:
if event.key.keysym.sym in (SDLK_UP, SDLK_DOWN):
world.component_for_entity(player, Velocity).y = 0
if event.key.keysym.sym in (SDLK_LEFT, SDLK_RIGHT):
world.component_for_entity(player, Velocity).x = 0
# A single call to world.process() will update all Processors:
world.process()
# A crude FPS limiter for about 60fps
current_time = SDL_GetTicks()
sleep_time = int(start_time + 16.667 - current_time)
if sleep_time > 0:
SDL_Delay(sleep_time)
if __name__ == "__main__":
run()
ext.quit()
|
mozilla/taar
|
taar/flask_app.py
|
flaskrun
|
python
|
def flaskrun(app, default_host="127.0.0.1", default_port="8000"):
# Set up the command-line options
parser = optparse.OptionParser()
parser.add_option(
"-H",
"--host",
help="Hostname of the Flask app " + "[default %s]" % default_host,
default=default_host,
)
parser.add_option(
"-P",
"--port",
help="Port for the Flask app " + "[default %s]" % default_port,
default=default_port,
)
# Two options useful for debugging purposes, but
# a bit dangerous so not exposed in the help message.
parser.add_option(
"-d", "--debug", action="store_true", dest="debug", help=optparse.SUPPRESS_HELP
)
parser.add_option(
"-p",
"--profile",
action="store_true",
dest="profile",
help=optparse.SUPPRESS_HELP,
)
options, _ = parser.parse_args()
# If the user selects the profiling option, then we need
# to do a little extra setup
if options.profile:
from werkzeug.contrib.profiler import ProfilerMiddleware
app.config["PROFILE"] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
options.debug = True
app.run(debug=options.debug, host=options.host, port=int(options.port))
|
Takes a flask.Flask instance and runs it. Parses
command-line flags to configure the app.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/flask_app.py#L41-L86
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
from flask import Flask
from dockerflow.flask import Dockerflow
import optparse
from decouple import config
import importlib
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
app = Flask(__name__)
dockerflow = Dockerflow(app)
# Hook the application plugin and configure it
PLUGIN = config("TAAR_API_PLUGIN", default=None)
sentry_sdk.init(
dsn=config("SENTRY_DSN", ''),
integrations=[FlaskIntegration()],
)
# There should only be a single registered app for the taar-api
if PLUGIN is None:
sys.stderr.write("No plugin is defined.\n")
sys.exit(1)
# Load the function and configure the application
sys.stdout.write("Loading [{}]\n".format(PLUGIN))
plugin_module = importlib.import_module(PLUGIN)
configure_plugin = importlib.import_module(PLUGIN).configure_plugin
APP_WRAPPER = configure_plugin(app)
if __name__ == "__main__":
flaskrun(app)
|
mozilla/taar
|
taar/recommenders/hybrid_recommender.py
|
CuratedWhitelistCache.get_randomized_guid_sample
|
python
|
def get_randomized_guid_sample(self, item_count):
dataset = self.get_whitelist()
random.shuffle(dataset)
return dataset[:item_count]
|
Fetch a subset of randomzied GUIDs from the whitelist
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/hybrid_recommender.py#L28-L32
|
[
"def get_whitelist(self):\n return self._data.get()[0]\n"
] |
class CuratedWhitelistCache:
"""
This fetches the curated whitelist from S3.
"""
def __init__(self, ctx):
self._ctx = ctx
self._data = LazyJSONLoader(
self._ctx, TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY
)
def get_whitelist(self):
return self._data.get()[0]
|
mozilla/taar
|
taar/recommenders/hybrid_recommender.py
|
CuratedRecommender.can_recommend
|
python
|
def can_recommend(self, client_data, extra_data={}):
self.logger.info("Curated can_recommend: {}".format(True))
return True
|
The Curated recommender will always be able to recommend
something
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/hybrid_recommender.py#L52-L56
| null |
class CuratedRecommender(AbstractRecommender):
"""
The curated recommender just delegates to the whitelist
that is provided by the AMO team.
This recommender simply provides a randomized sample of
pre-approved addons for recommendation. It does not use any other
external data to generate recommendations, nor does it use any
information from the Firefox agent.
"""
def __init__(self, ctx):
self._ctx = ctx
self.logger = self._ctx[IMozLogging].get_logger("taar.curated")
self._curated_wl = CuratedWhitelistCache(self._ctx)
def recommend(self, client_data, limit, extra_data={}):
"""
Curated recommendations are just random selections
"""
guids = self._curated_wl.get_randomized_guid_sample(limit)
results = [(guid, 1.0) for guid in guids]
log_data = (client_data["client_id"], str(guids))
self.logger.info(
"Curated recommendations client_id: [%s], guids: [%s]" % log_data
)
return results
|
mozilla/taar
|
taar/recommenders/hybrid_recommender.py
|
CuratedRecommender.recommend
|
python
|
def recommend(self, client_data, limit, extra_data={}):
guids = self._curated_wl.get_randomized_guid_sample(limit)
results = [(guid, 1.0) for guid in guids]
log_data = (client_data["client_id"], str(guids))
self.logger.info(
"Curated recommendations client_id: [%s], guids: [%s]" % log_data
)
return results
|
Curated recommendations are just random selections
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/hybrid_recommender.py#L58-L70
|
[
"def get_randomized_guid_sample(self, item_count):\n \"\"\" Fetch a subset of randomzied GUIDs from the whitelist \"\"\"\n dataset = self.get_whitelist()\n random.shuffle(dataset)\n return dataset[:item_count]\n"
] |
class CuratedRecommender(AbstractRecommender):
"""
The curated recommender just delegates to the whitelist
that is provided by the AMO team.
This recommender simply provides a randomized sample of
pre-approved addons for recommendation. It does not use any other
external data to generate recommendations, nor does it use any
information from the Firefox agent.
"""
def __init__(self, ctx):
self._ctx = ctx
self.logger = self._ctx[IMozLogging].get_logger("taar.curated")
self._curated_wl = CuratedWhitelistCache(self._ctx)
def can_recommend(self, client_data, extra_data={}):
"""The Curated recommender will always be able to recommend
something"""
self.logger.info("Curated can_recommend: {}".format(True))
return True
|
mozilla/taar
|
taar/recommenders/hybrid_recommender.py
|
HybridRecommender.can_recommend
|
python
|
def can_recommend(self, client_data, extra_data={}):
ensemble_recommend = self._ensemble_recommender.can_recommend(
client_data, extra_data
)
curated_recommend = self._curated_recommender.can_recommend(
client_data, extra_data
)
result = ensemble_recommend and curated_recommend
self.logger.info("Hybrid can_recommend: {}".format(result))
return result
|
The ensemble recommender is always going to be
available if at least one recommender is available
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/hybrid_recommender.py#L89-L100
| null |
class HybridRecommender(AbstractRecommender):
"""
The EnsembleRecommender is a collection of recommenders where the
results from each recommendation is amplified or dampened by a
factor. The aggregate results are combines and used to recommend
addons for users.
"""
def __init__(self, ctx):
self._ctx = ctx
self.logger = self._ctx[IMozLogging].get_logger("taar")
self._ensemble_recommender = self._ctx["ensemble_recommender"]
self._curated_recommender = CuratedRecommender(self._ctx.child())
def recommend(self, client_data, limit, extra_data={}):
"""
Hybrid recommendations simply select half recommendations from
the ensemble recommender, and half from the curated one.
Duplicate recommendations are accomodated by rank ordering
by weight.
"""
preinstalled_addon_ids = client_data.get("installed_addons", [])
# Compute an extended limit by adding the length of
# the list of any preinstalled addons.
extended_limit = limit + len(preinstalled_addon_ids)
ensemble_suggestions = self._ensemble_recommender.recommend(
client_data, extended_limit, extra_data
)
curated_suggestions = self._curated_recommender.recommend(
client_data, extended_limit, extra_data
)
# Generate a set of results from each of the composite
# recommenders. We select one item from each recommender
# sequentially so that we do not bias one recommender over the
# other.
merged_results = set()
while (
len(merged_results) < limit
and len(ensemble_suggestions) > 0
and len(curated_suggestions) > 0
):
r1 = ensemble_suggestions.pop()
if r1[0] not in [temp[0] for temp in merged_results]:
merged_results.add(r1)
# Terminate early if we have an odd number for the limit
if not (
len(merged_results) < limit
and len(ensemble_suggestions) > 0
and len(curated_suggestions) > 0
):
break
r2 = curated_suggestions.pop()
if r2[0] not in [temp[0] for temp in merged_results]:
merged_results.add(r2)
if len(merged_results) < limit:
msg = (
"Defaulting to empty results. Insufficient recommendations found for client: %s"
% client_data["client_id"]
)
self.logger.info(msg)
return []
sorted_results = sorted(
list(merged_results), key=op.itemgetter(1), reverse=True
)
log_data = (client_data["client_id"], str([r[0] for r in sorted_results]))
self.logger.info(
"Hybrid recommendations client_id: [%s], guids: [%s]" % log_data
)
return sorted_results
|
mozilla/taar
|
taar/recommenders/hybrid_recommender.py
|
HybridRecommender.recommend
|
python
|
def recommend(self, client_data, limit, extra_data={}):
preinstalled_addon_ids = client_data.get("installed_addons", [])
# Compute an extended limit by adding the length of
# the list of any preinstalled addons.
extended_limit = limit + len(preinstalled_addon_ids)
ensemble_suggestions = self._ensemble_recommender.recommend(
client_data, extended_limit, extra_data
)
curated_suggestions = self._curated_recommender.recommend(
client_data, extended_limit, extra_data
)
# Generate a set of results from each of the composite
# recommenders. We select one item from each recommender
# sequentially so that we do not bias one recommender over the
# other.
merged_results = set()
while (
len(merged_results) < limit
and len(ensemble_suggestions) > 0
and len(curated_suggestions) > 0
):
r1 = ensemble_suggestions.pop()
if r1[0] not in [temp[0] for temp in merged_results]:
merged_results.add(r1)
# Terminate early if we have an odd number for the limit
if not (
len(merged_results) < limit
and len(ensemble_suggestions) > 0
and len(curated_suggestions) > 0
):
break
r2 = curated_suggestions.pop()
if r2[0] not in [temp[0] for temp in merged_results]:
merged_results.add(r2)
if len(merged_results) < limit:
msg = (
"Defaulting to empty results. Insufficient recommendations found for client: %s"
% client_data["client_id"]
)
self.logger.info(msg)
return []
sorted_results = sorted(
list(merged_results), key=op.itemgetter(1), reverse=True
)
log_data = (client_data["client_id"], str([r[0] for r in sorted_results]))
self.logger.info(
"Hybrid recommendations client_id: [%s], guids: [%s]" % log_data
)
return sorted_results
|
Hybrid recommendations simply select half recommendations from
the ensemble recommender, and half from the curated one.
Duplicate recommendations are accomodated by rank ordering
by weight.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/hybrid_recommender.py#L102-L169
|
[
"def recommend(self, client_data, limit, extra_data={}):\n \"\"\"\n Curated recommendations are just random selections\n \"\"\"\n guids = self._curated_wl.get_randomized_guid_sample(limit)\n\n results = [(guid, 1.0) for guid in guids]\n\n log_data = (client_data[\"client_id\"], str(guids))\n self.logger.info(\n \"Curated recommendations client_id: [%s], guids: [%s]\" % log_data\n )\n return results\n"
] |
class HybridRecommender(AbstractRecommender):
"""
The EnsembleRecommender is a collection of recommenders where the
results from each recommendation is amplified or dampened by a
factor. The aggregate results are combines and used to recommend
addons for users.
"""
def __init__(self, ctx):
self._ctx = ctx
self.logger = self._ctx[IMozLogging].get_logger("taar")
self._ensemble_recommender = self._ctx["ensemble_recommender"]
self._curated_recommender = CuratedRecommender(self._ctx.child())
def can_recommend(self, client_data, extra_data={}):
"""The ensemble recommender is always going to be
available if at least one recommender is available"""
ensemble_recommend = self._ensemble_recommender.can_recommend(
client_data, extra_data
)
curated_recommend = self._curated_recommender.can_recommend(
client_data, extra_data
)
result = ensemble_recommend and curated_recommend
self.logger.info("Hybrid can_recommend: {}".format(result))
return result
|
mozilla/taar
|
taar/recommenders/ensemble_recommender.py
|
EnsembleRecommender.can_recommend
|
python
|
def can_recommend(self, client_data, extra_data={}):
result = sum(
[
self._recommender_map[rkey].can_recommend(client_data)
for rkey in self.RECOMMENDER_KEYS
]
)
self.logger.info("Ensemble can_recommend: {}".format(result))
return result
|
The ensemble recommender is always going to be
available if at least one recommender is available
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/ensemble_recommender.py#L54-L64
| null |
class EnsembleRecommender(AbstractRecommender):
"""
The EnsembleRecommender is a collection of recommenders where the
results from each recommendation is amplified or dampened by a
factor. The aggregate results are combines and used to recommend
addons for users.
"""
def __init__(self, ctx):
self.RECOMMENDER_KEYS = ["collaborative", "similarity", "locale"]
self._ctx = ctx
self.logger = self._ctx[IMozLogging].get_logger("taar.ensemble")
assert "recommender_factory" in self._ctx
self._init_from_ctx()
def _init_from_ctx(self):
# Copy the map of the recommenders
self._recommender_map = {}
recommender_factory = self._ctx["recommender_factory"]
for rkey in self.RECOMMENDER_KEYS:
self._recommender_map[rkey] = recommender_factory.create(rkey)
self._weight_cache = WeightCache(self._ctx.child())
self.logger.info("EnsembleRecommender initialized")
def recommend(self, client_data, limit, extra_data={}):
try:
results = self._recommend(client_data, limit, extra_data)
except Exception as e:
results = []
self._weight_cache._weights.force_expiry()
self.logger.exception(
"Ensemble recommender crashed for {}".format(
client_data.get("client_id", "no-client-id")
),
e,
)
return results
def _recommend(self, client_data, limit, extra_data={}):
"""
Ensemble recommendations are aggregated from individual
recommenders. The ensemble recommender applies a weight to
the recommendation outputs of each recommender to reorder the
recommendations to be a better fit.
The intuitive understanding is that the total space of
recommended addons across all recommenders will include the
'true' addons that should be recommended better than any
individual recommender. The ensemble method simply needs to
weight each recommender appropriate so that the ordering is
correct.
"""
self.logger.info("Ensemble recommend invoked")
preinstalled_addon_ids = client_data.get("installed_addons", [])
# Compute an extended limit by adding the length of
# the list of any preinstalled addons.
extended_limit = limit + len(preinstalled_addon_ids)
flattened_results = []
ensemble_weights = self._weight_cache.getWeights()
for rkey in self.RECOMMENDER_KEYS:
recommender = self._recommender_map[rkey]
if recommender.can_recommend(client_data):
raw_results = recommender.recommend(
client_data, extended_limit, extra_data
)
reweighted_results = []
for guid, weight in raw_results:
item = (guid, weight * ensemble_weights[rkey])
reweighted_results.append(item)
flattened_results.extend(reweighted_results)
# Sort the results by the GUID
flattened_results.sort(key=lambda item: item[0])
# group by the guid, sum up the weights for recurring GUID
# suggestions across all recommenders
guid_grouper = itertools.groupby(flattened_results, lambda item: item[0])
ensemble_suggestions = []
for (guid, guid_group) in guid_grouper:
weight_sum = sum([v for (g, v) in guid_group])
item = (guid, weight_sum)
ensemble_suggestions.append(item)
# Sort in reverse order (greatest weight to least)
ensemble_suggestions.sort(key=lambda x: -x[1])
filtered_ensemble_suggestions = [
(guid, weight)
for (guid, weight) in ensemble_suggestions
if guid not in preinstalled_addon_ids
]
results = filtered_ensemble_suggestions[:limit]
log_data = (
client_data["client_id"],
str(ensemble_weights),
str([r[0] for r in results]),
)
self.logger.info(
"client_id: [%s], ensemble_weight: [%s], guids: [%s]" % log_data
)
return results
|
mozilla/taar
|
taar/recommenders/ensemble_recommender.py
|
EnsembleRecommender._recommend
|
python
|
def _recommend(self, client_data, limit, extra_data={}):
self.logger.info("Ensemble recommend invoked")
preinstalled_addon_ids = client_data.get("installed_addons", [])
# Compute an extended limit by adding the length of
# the list of any preinstalled addons.
extended_limit = limit + len(preinstalled_addon_ids)
flattened_results = []
ensemble_weights = self._weight_cache.getWeights()
for rkey in self.RECOMMENDER_KEYS:
recommender = self._recommender_map[rkey]
if recommender.can_recommend(client_data):
raw_results = recommender.recommend(
client_data, extended_limit, extra_data
)
reweighted_results = []
for guid, weight in raw_results:
item = (guid, weight * ensemble_weights[rkey])
reweighted_results.append(item)
flattened_results.extend(reweighted_results)
# Sort the results by the GUID
flattened_results.sort(key=lambda item: item[0])
# group by the guid, sum up the weights for recurring GUID
# suggestions across all recommenders
guid_grouper = itertools.groupby(flattened_results, lambda item: item[0])
ensemble_suggestions = []
for (guid, guid_group) in guid_grouper:
weight_sum = sum([v for (g, v) in guid_group])
item = (guid, weight_sum)
ensemble_suggestions.append(item)
# Sort in reverse order (greatest weight to least)
ensemble_suggestions.sort(key=lambda x: -x[1])
filtered_ensemble_suggestions = [
(guid, weight)
for (guid, weight) in ensemble_suggestions
if guid not in preinstalled_addon_ids
]
results = filtered_ensemble_suggestions[:limit]
log_data = (
client_data["client_id"],
str(ensemble_weights),
str([r[0] for r in results]),
)
self.logger.info(
"client_id: [%s], ensemble_weight: [%s], guids: [%s]" % log_data
)
return results
|
Ensemble recommendations are aggregated from individual
recommenders. The ensemble recommender applies a weight to
the recommendation outputs of each recommender to reorder the
recommendations to be a better fit.
The intuitive understanding is that the total space of
recommended addons across all recommenders will include the
'true' addons that should be recommended better than any
individual recommender. The ensemble method simply needs to
weight each recommender appropriate so that the ordering is
correct.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/ensemble_recommender.py#L81-L150
| null |
class EnsembleRecommender(AbstractRecommender):
"""
The EnsembleRecommender is a collection of recommenders where the
results from each recommendation is amplified or dampened by a
factor. The aggregate results are combines and used to recommend
addons for users.
"""
def __init__(self, ctx):
self.RECOMMENDER_KEYS = ["collaborative", "similarity", "locale"]
self._ctx = ctx
self.logger = self._ctx[IMozLogging].get_logger("taar.ensemble")
assert "recommender_factory" in self._ctx
self._init_from_ctx()
def _init_from_ctx(self):
# Copy the map of the recommenders
self._recommender_map = {}
recommender_factory = self._ctx["recommender_factory"]
for rkey in self.RECOMMENDER_KEYS:
self._recommender_map[rkey] = recommender_factory.create(rkey)
self._weight_cache = WeightCache(self._ctx.child())
self.logger.info("EnsembleRecommender initialized")
def can_recommend(self, client_data, extra_data={}):
"""The ensemble recommender is always going to be
available if at least one recommender is available"""
result = sum(
[
self._recommender_map[rkey].can_recommend(client_data)
for rkey in self.RECOMMENDER_KEYS
]
)
self.logger.info("Ensemble can_recommend: {}".format(result))
return result
def recommend(self, client_data, limit, extra_data={}):
try:
results = self._recommend(client_data, limit, extra_data)
except Exception as e:
results = []
self._weight_cache._weights.force_expiry()
self.logger.exception(
"Ensemble recommender crashed for {}".format(
client_data.get("client_id", "no-client-id")
),
e,
)
return results
|
mozilla/taar
|
taar/recommenders/collaborative_recommender.py
|
synchronized
|
python
|
def synchronized(wrapped):
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
self = args[0]
with self._lock:
return wrapped(*args, **kwargs)
return wrapper
|
Synchronization decorator.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/collaborative_recommender.py#L20-L29
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from srgutil.interfaces import IMozLogging
from .lazys3 import LazyJSONLoader
import numpy as np
import operator as op
import functools
import threading
from .base_recommender import AbstractRecommender
from .s3config import TAAR_ITEM_MATRIX_BUCKET
from .s3config import TAAR_ITEM_MATRIX_KEY
from .s3config import TAAR_ADDON_MAPPING_BUCKET
from .s3config import TAAR_ADDON_MAPPING_KEY
def java_string_hashcode(s):
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
def positive_hash(s):
return java_string_hashcode(s) & 0x7FFFFF
class CollaborativeRecommender(AbstractRecommender):
""" The addon recommendation interface to the collaborative filtering model.
Usage example::
recommender = CollaborativeRecommender()
dists = recommender.recommend(client_info)
"""
def __init__(self, ctx):
self._ctx = ctx
self._lock = threading.RLock()
self._addon_mapping = LazyJSONLoader(
self._ctx, TAAR_ADDON_MAPPING_BUCKET, TAAR_ADDON_MAPPING_KEY
)
self._raw_item_matrix = LazyJSONLoader(
self._ctx, TAAR_ITEM_MATRIX_BUCKET, TAAR_ITEM_MATRIX_KEY
)
self.logger = self._ctx[IMozLogging].get_logger("taar")
self.model = None
@property
def addon_mapping(self):
return self._addon_mapping.get()[0]
@property
def raw_item_matrix(self):
val, new_copy = self._raw_item_matrix.get()
if val is not None and new_copy:
# Build a dense numpy matrix out of it.
num_rows = len(val)
num_cols = len(val[0]["features"])
self.model = np.zeros(shape=(num_rows, num_cols))
for index, row in enumerate(val):
self.model[index, :] = row["features"]
elif val is None and new_copy:
self.model = None
return val
def _load_json_models(self):
# Download the addon mappings.
if self.addon_mapping is None:
self.logger.error(
"Cannot download the addon mapping file {} {}".format(
TAAR_ADDON_MAPPING_BUCKET, TAAR_ADDON_MAPPING_KEY
)
)
if self.addon_mapping is None:
self.logger.error(
"Cannot download the model file {} {}".format(
TAAR_ITEM_MATRIX_BUCKET, TAAR_ITEM_MATRIX_KEY
)
)
@synchronized
def can_recommend(self, client_data, extra_data={}):
# We can't recommend if we don't have our data files.
if (
self.raw_item_matrix is None
or self.model is None
or self.addon_mapping is None
):
return False
# We only get meaningful recommendation if a client has at least an
# addon installed.
if len(client_data.get("installed_addons", [])) > 0:
return True
return False
def _recommend(self, client_data, limit, extra_data):
installed_addons_as_hashes = [
positive_hash(addon_id)
for addon_id in client_data.get("installed_addons", [])
]
# Build the query vector by setting the position of the queried addons to 1.0
# and the other to 0.0.
query_vector = np.array(
[
1.0 if (entry.get("id") in installed_addons_as_hashes) else 0.0
for entry in self.raw_item_matrix
]
)
# Build the user factors matrix.
user_factors = np.matmul(query_vector, self.model)
user_factors_transposed = np.transpose(user_factors)
# Compute the distance between the user and all the addons in the latent
# space.
distances = {}
for addon in self.raw_item_matrix:
# We don't really need to show the items we requested.
# They will always end up with the greatest score. Also
# filter out legacy addons from the suggestions.
hashed_id = addon.get("id")
str_hashed_id = str(hashed_id)
if (
hashed_id in installed_addons_as_hashes
or str_hashed_id not in self.addon_mapping
or self.addon_mapping[str_hashed_id].get("isWebextension", False)
is False
):
continue
dist = np.dot(user_factors_transposed, addon.get("features"))
# Read the addon ids from the "addon_mapping" looking it
# up by 'id' (which is an hashed value).
addon_id = self.addon_mapping[str_hashed_id].get("id")
distances[addon_id] = dist
# Sort the suggested addons by their score and return the
# sorted list of addon ids.
sorted_dists = sorted(distances.items(), key=op.itemgetter(1), reverse=True)
recommendations = [(s[0], s[1]) for s in sorted_dists[:limit]]
return recommendations
def recommend(self, client_data, limit, extra_data={}):
# Addons identifiers are stored as positive hash values within the model.
with self._lock:
try:
recommendations = self._recommend(client_data, limit, extra_data)
except Exception as e:
recommendations = []
self._addon_mapping.force_expiry()
self._raw_item_matrix.force_expiry()
self.logger.exception(
"Collaborative recommender crashed for {}".format(
client_data.get("client_id", "no-client-id")
),
e,
)
log_data = (client_data["client_id"], str([r[0] for r in recommendations]))
self.logger.info(
"collaborative_recommender_triggered, "
"client_id: [%s], "
"guids: [%s]" % log_data
)
return recommendations
|
mozilla/taar
|
taar/recommenders/lazys3.py
|
LazyJSONLoader.get
|
python
|
def get(self, transform=None):
if not self.has_expired() and self._cached_copy is not None:
return self._cached_copy, False
return self._refresh_cache(transform), True
|
Return the JSON defined at the S3 location in the constructor.
The get method will reload the S3 object after the TTL has
expired.
Fetch the JSON object from cache or S3 if necessary
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/lazys3.py#L43-L54
|
[
"def has_expired(self):\n return self._clock.time() > self._expiry_time\n"
] |
class LazyJSONLoader:
def __init__(self, ctx, s3_bucket, s3_key, ttl=14400):
self._ctx = ctx
self.logger = self._ctx[IMozLogging].get_logger("taar")
self._clock = self._ctx[IClock]
self._s3_bucket = s3_bucket
self._s3_key = s3_key
self._ttl = int(ttl)
self._expiry_time = 0
self._key_str = "{}|{}".format(self._s3_bucket, self._s3_key)
self._cached_copy = None
msg = "Cache expiry of {} is set to TTL of {} seconds".format(
self._key_str, self._ttl
)
self.logger.info(msg)
self._lock = threading.RLock()
self.logger.info("{} loader is initialized".format(self._key_str))
def force_expiry(self):
msg = "Existing model for {} reset to 0. Model was:".format(
self._key_str, str(self._cached_copy)
)
self.logger.info(msg)
self._expiry_time = 0
def has_expired(self):
return self._clock.time() > self._expiry_time
def _refresh_cache(self, transform=None):
with self._lock:
# If some requests get stale data while the S3 bucket is
# being reloaded - it's not the end of the world.
#
# Likewise when the TTL expires, it's possible for
# multiple threads to concurrently lock and update the
# cache. Again - not world ending.
#
# Immediately update the expiry time as we don't want other
# threads to wait on the lock while we update the
# cached_copy
#
self._expiry_time = self._clock.time() + self._ttl
raw_data = None
raw_bytes = None
try:
# We need to force a data reload from S3
config = Config(connect_timeout=10, retries={"max_attempts": 3})
s3 = boto3.resource("s3", config=config)
start_load = time.time()
raw_bytes = (
s3.Object(self._s3_bucket, self._s3_key).get()["Body"].read()
)
end_load = time.time()
load_time = end_load - start_load
raw_data = raw_bytes.decode("utf-8")
msg = "Loaded S3: {}. Byte count: {:d}. Time to Load: {:0.3f}"
msg_params = self._key_str, len(raw_bytes), load_time
self.logger.info(msg.format(*msg_params))
# It is possible to have corrupted files in S3, so
# protect against that.
try:
tmp = json.loads(raw_data)
if transform is not None:
tmp = transform(tmp)
self._cached_copy = tmp
except ValueError:
# In the event of an error, we want to try to reload
# the data so force the expiry to 0, but leave the
# existing cached data alone so we can still service
# requests.
self._expiry_time = 0
self.logger.error(
"Cannot parse JSON resource from S3",
extra={"bucket": self._s3_bucket, "key": self._s3_key},
)
return self._cached_copy
except Exception:
# In the event of an error, we want to try to reload
# the data so force the expiry to 0, but leave the
# existing cached data alone so we can still service
# requests.
self._expiry_time = 0
self.logger.exception(
"Failed to download from S3",
extra={"bucket": self._s3_bucket, "key": self._s3_key},
)
return self._cached_copy
|
mozilla/taar
|
bin/pipstrap.py
|
hashed_download
|
python
|
def hashed_download(url, temp, digest):
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener():
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
return opener
def read_chunks(response, chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
response = opener().open(url)
path = join(temp, urlparse(url).path.split('/')[-1])
actual_hash = sha256()
with open(path, 'wb') as file:
for chunk in read_chunks(response, 4096):
file.write(chunk)
actual_hash.update(chunk)
actual_digest = actual_hash.hexdigest()
if actual_digest != digest:
raise HashError(url, path, actual_digest, digest)
return path
|
Download ``url`` to ``temp``, make sure it has the SHA-256 ``digest``,
and return its path.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/bin/pipstrap.py#L65-L95
|
[
"def opener():\n opener = build_opener(HTTPSHandler())\n # Strip out HTTPHandler to prevent MITM spoof:\n for handler in opener.handlers:\n if isinstance(handler, HTTPHandler):\n opener.handlers.remove(handler)\n return opener\n",
"def read_chunks(response, chunk_size):\n while True:\n chunk = response.read(chunk_size)\n if not chunk:\n break\n yield chunk\n"
] |
#!/usr/bin/env python
"""A small script that can act as a trust root for installing pip 8
Embed this in your project, and your VCS checkout is all you have to trust. In
a post-peep era, this lets you claw your way to a hash-checking version of pip,
with which you can install the rest of your dependencies safely. All it assumes
is Python 2.7 or better and *some* version of pip already installed. If
anything goes wrong, it will exit with a non-zero status code.
"""
# This is here so embedded copies are MIT-compliant:
# Copyright (c) 2016 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
from hashlib import sha256
from os.path import join
from pipes import quote
from shutil import rmtree
from subprocess import check_output
from sys import exit
from tempfile import mkdtemp
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
PACKAGES = [
# Pip has no dependencies, as it vendors everything:
('https://pypi.python.org/packages/source/p/pip/pip-8.0.2.tar.gz',
'46f4bd0d8dfd51125a554568d646fe4200a3c2c6c36b9f2d06d2212148439521'),
# This version of setuptools has only optional dependencies:
('https://pypi.python.org/packages/source/s/setuptools/'
'setuptools-19.4.tar.gz',
'214bf29933f47cf25e6faa569f710731728a07a19cae91ea64f826051f68a8cf'),
# We require Python 2.7 or later because we don't support wheel's
# conditional dep on argparse. This version of wheel has no other
# dependencies:
('https://pypi.python.org/packages/source/w/wheel/wheel-0.26.0.tar.gz',
'eaad353805c180a47545a256e6508835b65a8e830ba1093ed8162f19a50a530c')
]
class HashError(Exception):
def __str__(self):
url, path, actual, expected = self.args
return ('{url} did not match the expected hash {expected}. Instead, '
'it was {actual}. The file (left at {path}) may have been '
'tampered with.'.format(**locals()))
def main():
temp = mkdtemp(prefix='pipstrap-')
try:
downloads = [hashed_download(url, temp, digest)
for url, digest in PACKAGES]
check_output('pip install --no-index --no-deps -U ' +
' '.join(quote(d) for d in downloads),
shell=True)
except HashError as exc:
print(exc)
except Exception:
rmtree(temp)
raise
else:
rmtree(temp)
return 0
return 1
if __name__ == '__main__':
exit(main())
|
mozilla/taar
|
taar/recommenders/similarity_recommender.py
|
SimilarityRecommender._build_features_caches
|
python
|
def _build_features_caches(self):
_donors_pool = self._donors_pool.get()[0]
_lr_curves = self._lr_curves.get()[0]
if _donors_pool is None or _lr_curves is None:
# We need to have both donors_pool and lr_curves defined
# to reconstruct the matrices
return None
self.num_donors = len(_donors_pool)
# Build a numpy matrix cache for the continuous features.
self.continuous_features = np.zeros((self.num_donors, len(CONTINUOUS_FEATURES)))
for idx, d in enumerate(_donors_pool):
features = [d.get(specified_key) for specified_key in CONTINUOUS_FEATURES]
self.continuous_features[idx] = features
# Build the cache for categorical features.
self.categorical_features = np.zeros(
(self.num_donors, len(CATEGORICAL_FEATURES)), dtype="object"
)
for idx, d in enumerate(_donors_pool):
features = [d.get(specified_key) for specified_key in CATEGORICAL_FEATURES]
self.categorical_features[idx] = np.array([features], dtype="object")
self.logger.info("Reconstructed matrices for similarity recommender")
|
This function build two feature cache matrices.
That's the self.categorical_features and
self.continuous_features attributes.
One matrix is for the continuous features and the other is for
the categorical features. This is needed to speed up the similarity
recommendation process.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/similarity_recommender.py#L103-L136
| null |
class SimilarityRecommender(AbstractRecommender):
""" A recommender class that returns top N addons based on the
client similarity with a set of candidate addon donors.
Several telemetry fields are used to compute pairwise similarity
with the donors and similarities are converted into a likelihood
ratio of being a good match versus not being a good match. These
quantities are then used to rank specific addons for
recommendation.
This will load a json file containing updated list of addon donors
updated periodically by a separate weekly process using
Longitdudinal Telemetry data.
This recommender may provide useful recommendations when
collaborative_recommender may not work.
"""
def __init__(self, ctx):
self._ctx = ctx
if "similarity_donors_pool" in self._ctx:
self._donors_pool = self._ctx["similarity_donors_pool"]
else:
self._donors_pool = LazyJSONLoader(
self._ctx, TAAR_SIMILARITY_BUCKET, TAAR_SIMILARITY_DONOR_KEY
)
if "similarity_lr_curves" in self._ctx:
self._lr_curves = self._ctx["similarity_lr_curves"]
else:
self._lr_curves = LazyJSONLoader(
self._ctx, TAAR_SIMILARITY_BUCKET, TAAR_SIMILARITY_LRCURVES_KEY
)
self.logger = self._ctx[IMozLogging].get_logger("taar")
self._init_from_ctx()
@property
def donors_pool(self):
result, status = self._donors_pool.get()
if status:
# Force a reconstruction of the features cache on new
# donor pool data
self._build_features_caches()
return result
@property
def lr_curves(self):
result, status = self._lr_curves.get()
if status:
# Force a reconstruction of the features cache on new
# curve data
self._build_features_caches()
return result
def _init_from_ctx(self):
# Download the addon donors list.
if self.donors_pool is None:
self.logger.info(
"Similarity donors pool has not been fetched from S3: {}".format(
TAAR_SIMILARITY_DONOR_KEY
)
)
# Download the probability mapping curves from similarity to likelihood of being a good donor.
if self.lr_curves is None:
self.logger.error(
"Similarity LR Curves have not been fetched from S3: {}".format(
TAAR_SIMILARITY_LRCURVES_KEY
)
)
def can_recommend(self, client_data, extra_data={}):
# We can't recommend if we don't have our data files.
if self.donors_pool is None or self.lr_curves is None:
return False
# Check that the client info contains a non-None value for each required
# telemetry field.
REQUIRED_FIELDS = CATEGORICAL_FEATURES + CONTINUOUS_FEATURES
has_fields = all(
[client_data.get(f, None) is not None for f in REQUIRED_FIELDS]
)
if not has_fields:
# Can not add extra info because client_id may not be available.
self.logger.error("Unusable client data encountered")
return has_fields
def get_lr(self, score):
"""Compute a :float: likelihood ratio from a provided similarity score when compared
to two probability density functions which are computed and pre-loaded during init.
The numerator indicates the probability density that a particular similarity score
corresponds to a 'good' addon donor, i.e. a client that is similar in the sense of
telemetry variables. The denominator indicates the probability density that a particular
similarity score corresponds to a 'poor' addon donor
:param score: A similarity score between a pair of objects.
:returns: The approximate float likelihood ratio corresponding to provided score.
"""
# Find the index of the closest value that was precomputed in lr_curves
# This will significantly speed up |get_lr|.
# The lr_curves_cache is a list of scalar distance
# measurements
lr_curves_cache = np.array([s[0] for s in self.lr_curves])
# np.argmin produces the index to the part of the curve
# where distance is the smallest to the score which we are
# inspecting currently.
idx = np.argmin(abs(score - lr_curves_cache))
numer_val = self.lr_curves[idx][1][0]
denum_val = self.lr_curves[idx][1][1]
# Compute LR based on numerator and denominator values
return float(numer_val) / float(denum_val)
# # # CAUTION! # # #
# Any changes to this function must be reflected in the corresponding ETL job.
# https://github.com/mozilla/python_mozetl/blob/master/mozetl/taar/taar_similarity.py
#
def compute_clients_dist(self, client_data):
client_categorical_feats = [
client_data.get(specified_key) for specified_key in CATEGORICAL_FEATURES
]
client_continuous_feats = [
client_data.get(specified_key) for specified_key in CONTINUOUS_FEATURES
]
# Compute the distances between the user and the cached continuous features.
cont_features = distance.cdist(
self.continuous_features, np.array([client_continuous_feats]), "canberra"
)
# Compute the distances between the user and the cached categorical features.
cat_features = np.array(
[
[distance.hamming(x, client_categorical_feats)]
for x in self.categorical_features
]
)
# See the "Note about cdist optimization" in README.md for why we only use cdist once.
# Take the product of similarities to attain a univariate similarity score.
# Note that the addition of 0.001 to the continuous features
# sets a floor value to the distance in continuous similarity
# scores. There is no such floor value set for categorical
# features so this adjustment prioritizes categorical
# similarity over continous similarity
return (cont_features + FLOOR_DISTANCE_ADJUSTMENT) * cat_features
def get_similar_donors(self, client_data):
"""Computes a set of :float: similarity scores between a client and a set of candidate
donors for which comparable variables have been measured.
A custom similarity metric is defined in this function that combines the Hamming distance
for categorical variables with the Canberra distance for continuous variables into a
univariate similarity metric between the client and a set of candidate donors loaded during
init.
:param client_data: a client data payload including a subset fo telemetry fields.
:return: the sorted approximate likelihood ratio (np.array) corresponding to the
internally computed similarity score and a list of indices that link
each LR score with the related donor in the |self.donors_pool|.
"""
# Compute the distance between self and any comparable client.
distances = self.compute_clients_dist(client_data)
# Compute the LR based on precomputed distributions that relate the score
# to a probability of providing good addon recommendations.
lrs_from_scores = np.array(
[self.get_lr(distances[i]) for i in range(self.num_donors)]
)
# Sort the LR values (descending) and return the sorted values together with
# the original indices.
indices = (-lrs_from_scores).argsort()
return lrs_from_scores[indices], indices
def _recommend(self, client_data, limit, extra_data={}):
donor_set_ranking, indices = self.get_similar_donors(client_data)
donor_log_lrs = np.log(donor_set_ranking)
# 1.0 corresponds to a log likelihood ratio of 0 meaning that donors are equally
# likely to be 'good'. A value > 0.0 is sufficient, but we like this to be high.
if donor_log_lrs[0] < 0.1:
self.logger.warning(
"Addons recommended with very low similarity score, perhaps donor set is unrepresentative",
extra={"maximum_similarity": donor_set_ranking[0]},
)
# Retrieve the indices of the highest ranked donors and then append their
# installed addons.
index_lrs_iter = zip(indices[donor_log_lrs > 0.0], donor_log_lrs)
recommendations = []
for (index, lrs) in index_lrs_iter:
for term in self.donors_pool[index]["active_addons"]:
candidate = (term, lrs)
recommendations.append(candidate)
# Sort recommendations on key (guid name)
recommendations = sorted(recommendations, key=lambda x: x[0])
recommendations_out = []
# recommendations must be sorted for this to work.
for guid_key, group in groupby(recommendations, key=lambda x: x[0]):
recommendations_out.append((guid_key, sum(j for i, j in group)))
# now re-sort on the basis of LLR.
recommendations_out = sorted(recommendations_out, key=lambda x: -x[1])
log_data = (
client_data["client_id"],
str([r[0] for r in recommendations_out[:limit]]),
)
self.logger.info(
"similarity_recommender_triggered, "
"client_id: [%s], guids: [%s]" % log_data
)
return recommendations_out
def recommend(self, client_data, limit, extra_data={}):
try:
recommendations_out = self._recommend(client_data, limit, extra_data)
except Exception as e:
recommendations_out = []
self._donors_pool.force_expiry()
self._lr_curves.force_expiry()
self.logger.exception(
"Similarity recommender crashed for {}".format(
client_data.get("client_id", "no-client-id")
),
e,
)
return recommendations_out[:limit]
|
mozilla/taar
|
taar/recommenders/similarity_recommender.py
|
SimilarityRecommender.get_lr
|
python
|
def get_lr(self, score):
# Find the index of the closest value that was precomputed in lr_curves
# This will significantly speed up |get_lr|.
# The lr_curves_cache is a list of scalar distance
# measurements
lr_curves_cache = np.array([s[0] for s in self.lr_curves])
# np.argmin produces the index to the part of the curve
# where distance is the smallest to the score which we are
# inspecting currently.
idx = np.argmin(abs(score - lr_curves_cache))
numer_val = self.lr_curves[idx][1][0]
denum_val = self.lr_curves[idx][1][1]
# Compute LR based on numerator and denominator values
return float(numer_val) / float(denum_val)
|
Compute a :float: likelihood ratio from a provided similarity score when compared
to two probability density functions which are computed and pre-loaded during init.
The numerator indicates the probability density that a particular similarity score
corresponds to a 'good' addon donor, i.e. a client that is similar in the sense of
telemetry variables. The denominator indicates the probability density that a particular
similarity score corresponds to a 'poor' addon donor
:param score: A similarity score between a pair of objects.
:returns: The approximate float likelihood ratio corresponding to provided score.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/similarity_recommender.py#L155-L183
| null |
class SimilarityRecommender(AbstractRecommender):
""" A recommender class that returns top N addons based on the
client similarity with a set of candidate addon donors.
Several telemetry fields are used to compute pairwise similarity
with the donors and similarities are converted into a likelihood
ratio of being a good match versus not being a good match. These
quantities are then used to rank specific addons for
recommendation.
This will load a json file containing updated list of addon donors
updated periodically by a separate weekly process using
Longitdudinal Telemetry data.
This recommender may provide useful recommendations when
collaborative_recommender may not work.
"""
def __init__(self, ctx):
self._ctx = ctx
if "similarity_donors_pool" in self._ctx:
self._donors_pool = self._ctx["similarity_donors_pool"]
else:
self._donors_pool = LazyJSONLoader(
self._ctx, TAAR_SIMILARITY_BUCKET, TAAR_SIMILARITY_DONOR_KEY
)
if "similarity_lr_curves" in self._ctx:
self._lr_curves = self._ctx["similarity_lr_curves"]
else:
self._lr_curves = LazyJSONLoader(
self._ctx, TAAR_SIMILARITY_BUCKET, TAAR_SIMILARITY_LRCURVES_KEY
)
self.logger = self._ctx[IMozLogging].get_logger("taar")
self._init_from_ctx()
@property
def donors_pool(self):
result, status = self._donors_pool.get()
if status:
# Force a reconstruction of the features cache on new
# donor pool data
self._build_features_caches()
return result
@property
def lr_curves(self):
result, status = self._lr_curves.get()
if status:
# Force a reconstruction of the features cache on new
# curve data
self._build_features_caches()
return result
def _init_from_ctx(self):
# Download the addon donors list.
if self.donors_pool is None:
self.logger.info(
"Similarity donors pool has not been fetched from S3: {}".format(
TAAR_SIMILARITY_DONOR_KEY
)
)
# Download the probability mapping curves from similarity to likelihood of being a good donor.
if self.lr_curves is None:
self.logger.error(
"Similarity LR Curves have not been fetched from S3: {}".format(
TAAR_SIMILARITY_LRCURVES_KEY
)
)
def _build_features_caches(self):
"""This function build two feature cache matrices.
That's the self.categorical_features and
self.continuous_features attributes.
One matrix is for the continuous features and the other is for
the categorical features. This is needed to speed up the similarity
recommendation process."""
_donors_pool = self._donors_pool.get()[0]
_lr_curves = self._lr_curves.get()[0]
if _donors_pool is None or _lr_curves is None:
# We need to have both donors_pool and lr_curves defined
# to reconstruct the matrices
return None
self.num_donors = len(_donors_pool)
# Build a numpy matrix cache for the continuous features.
self.continuous_features = np.zeros((self.num_donors, len(CONTINUOUS_FEATURES)))
for idx, d in enumerate(_donors_pool):
features = [d.get(specified_key) for specified_key in CONTINUOUS_FEATURES]
self.continuous_features[idx] = features
# Build the cache for categorical features.
self.categorical_features = np.zeros(
(self.num_donors, len(CATEGORICAL_FEATURES)), dtype="object"
)
for idx, d in enumerate(_donors_pool):
features = [d.get(specified_key) for specified_key in CATEGORICAL_FEATURES]
self.categorical_features[idx] = np.array([features], dtype="object")
self.logger.info("Reconstructed matrices for similarity recommender")
def can_recommend(self, client_data, extra_data={}):
# We can't recommend if we don't have our data files.
if self.donors_pool is None or self.lr_curves is None:
return False
# Check that the client info contains a non-None value for each required
# telemetry field.
REQUIRED_FIELDS = CATEGORICAL_FEATURES + CONTINUOUS_FEATURES
has_fields = all(
[client_data.get(f, None) is not None for f in REQUIRED_FIELDS]
)
if not has_fields:
# Can not add extra info because client_id may not be available.
self.logger.error("Unusable client data encountered")
return has_fields
# # # CAUTION! # # #
# Any changes to this function must be reflected in the corresponding ETL job.
# https://github.com/mozilla/python_mozetl/blob/master/mozetl/taar/taar_similarity.py
#
def compute_clients_dist(self, client_data):
client_categorical_feats = [
client_data.get(specified_key) for specified_key in CATEGORICAL_FEATURES
]
client_continuous_feats = [
client_data.get(specified_key) for specified_key in CONTINUOUS_FEATURES
]
# Compute the distances between the user and the cached continuous features.
cont_features = distance.cdist(
self.continuous_features, np.array([client_continuous_feats]), "canberra"
)
# Compute the distances between the user and the cached categorical features.
cat_features = np.array(
[
[distance.hamming(x, client_categorical_feats)]
for x in self.categorical_features
]
)
# See the "Note about cdist optimization" in README.md for why we only use cdist once.
# Take the product of similarities to attain a univariate similarity score.
# Note that the addition of 0.001 to the continuous features
# sets a floor value to the distance in continuous similarity
# scores. There is no such floor value set for categorical
# features so this adjustment prioritizes categorical
# similarity over continous similarity
return (cont_features + FLOOR_DISTANCE_ADJUSTMENT) * cat_features
def get_similar_donors(self, client_data):
"""Computes a set of :float: similarity scores between a client and a set of candidate
donors for which comparable variables have been measured.
A custom similarity metric is defined in this function that combines the Hamming distance
for categorical variables with the Canberra distance for continuous variables into a
univariate similarity metric between the client and a set of candidate donors loaded during
init.
:param client_data: a client data payload including a subset fo telemetry fields.
:return: the sorted approximate likelihood ratio (np.array) corresponding to the
internally computed similarity score and a list of indices that link
each LR score with the related donor in the |self.donors_pool|.
"""
# Compute the distance between self and any comparable client.
distances = self.compute_clients_dist(client_data)
# Compute the LR based on precomputed distributions that relate the score
# to a probability of providing good addon recommendations.
lrs_from_scores = np.array(
[self.get_lr(distances[i]) for i in range(self.num_donors)]
)
# Sort the LR values (descending) and return the sorted values together with
# the original indices.
indices = (-lrs_from_scores).argsort()
return lrs_from_scores[indices], indices
def _recommend(self, client_data, limit, extra_data={}):
donor_set_ranking, indices = self.get_similar_donors(client_data)
donor_log_lrs = np.log(donor_set_ranking)
# 1.0 corresponds to a log likelihood ratio of 0 meaning that donors are equally
# likely to be 'good'. A value > 0.0 is sufficient, but we like this to be high.
if donor_log_lrs[0] < 0.1:
self.logger.warning(
"Addons recommended with very low similarity score, perhaps donor set is unrepresentative",
extra={"maximum_similarity": donor_set_ranking[0]},
)
# Retrieve the indices of the highest ranked donors and then append their
# installed addons.
index_lrs_iter = zip(indices[donor_log_lrs > 0.0], donor_log_lrs)
recommendations = []
for (index, lrs) in index_lrs_iter:
for term in self.donors_pool[index]["active_addons"]:
candidate = (term, lrs)
recommendations.append(candidate)
# Sort recommendations on key (guid name)
recommendations = sorted(recommendations, key=lambda x: x[0])
recommendations_out = []
# recommendations must be sorted for this to work.
for guid_key, group in groupby(recommendations, key=lambda x: x[0]):
recommendations_out.append((guid_key, sum(j for i, j in group)))
# now re-sort on the basis of LLR.
recommendations_out = sorted(recommendations_out, key=lambda x: -x[1])
log_data = (
client_data["client_id"],
str([r[0] for r in recommendations_out[:limit]]),
)
self.logger.info(
"similarity_recommender_triggered, "
"client_id: [%s], guids: [%s]" % log_data
)
return recommendations_out
def recommend(self, client_data, limit, extra_data={}):
try:
recommendations_out = self._recommend(client_data, limit, extra_data)
except Exception as e:
recommendations_out = []
self._donors_pool.force_expiry()
self._lr_curves.force_expiry()
self.logger.exception(
"Similarity recommender crashed for {}".format(
client_data.get("client_id", "no-client-id")
),
e,
)
return recommendations_out[:limit]
|
mozilla/taar
|
taar/recommenders/similarity_recommender.py
|
SimilarityRecommender.get_similar_donors
|
python
|
def get_similar_donors(self, client_data):
# Compute the distance between self and any comparable client.
distances = self.compute_clients_dist(client_data)
# Compute the LR based on precomputed distributions that relate the score
# to a probability of providing good addon recommendations.
lrs_from_scores = np.array(
[self.get_lr(distances[i]) for i in range(self.num_donors)]
)
# Sort the LR values (descending) and return the sorted values together with
# the original indices.
indices = (-lrs_from_scores).argsort()
return lrs_from_scores[indices], indices
|
Computes a set of :float: similarity scores between a client and a set of candidate
donors for which comparable variables have been measured.
A custom similarity metric is defined in this function that combines the Hamming distance
for categorical variables with the Canberra distance for continuous variables into a
univariate similarity metric between the client and a set of candidate donors loaded during
init.
:param client_data: a client data payload including a subset fo telemetry fields.
:return: the sorted approximate likelihood ratio (np.array) corresponding to the
internally computed similarity score and a list of indices that link
each LR score with the related donor in the |self.donors_pool|.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/similarity_recommender.py#L220-L247
|
[
"def compute_clients_dist(self, client_data):\n client_categorical_feats = [\n client_data.get(specified_key) for specified_key in CATEGORICAL_FEATURES\n ]\n client_continuous_feats = [\n client_data.get(specified_key) for specified_key in CONTINUOUS_FEATURES\n ]\n\n # Compute the distances between the user and the cached continuous features.\n cont_features = distance.cdist(\n self.continuous_features, np.array([client_continuous_feats]), \"canberra\"\n )\n\n # Compute the distances between the user and the cached categorical features.\n cat_features = np.array(\n [\n [distance.hamming(x, client_categorical_feats)]\n for x in self.categorical_features\n ]\n )\n\n # See the \"Note about cdist optimization\" in README.md for why we only use cdist once.\n\n # Take the product of similarities to attain a univariate similarity score.\n # Note that the addition of 0.001 to the continuous features\n # sets a floor value to the distance in continuous similarity\n # scores. There is no such floor value set for categorical\n # features so this adjustment prioritizes categorical\n # similarity over continous similarity\n return (cont_features + FLOOR_DISTANCE_ADJUSTMENT) * cat_features\n"
] |
class SimilarityRecommender(AbstractRecommender):
""" A recommender class that returns top N addons based on the
client similarity with a set of candidate addon donors.
Several telemetry fields are used to compute pairwise similarity
with the donors and similarities are converted into a likelihood
ratio of being a good match versus not being a good match. These
quantities are then used to rank specific addons for
recommendation.
This will load a json file containing updated list of addon donors
updated periodically by a separate weekly process using
Longitdudinal Telemetry data.
This recommender may provide useful recommendations when
collaborative_recommender may not work.
"""
def __init__(self, ctx):
self._ctx = ctx
if "similarity_donors_pool" in self._ctx:
self._donors_pool = self._ctx["similarity_donors_pool"]
else:
self._donors_pool = LazyJSONLoader(
self._ctx, TAAR_SIMILARITY_BUCKET, TAAR_SIMILARITY_DONOR_KEY
)
if "similarity_lr_curves" in self._ctx:
self._lr_curves = self._ctx["similarity_lr_curves"]
else:
self._lr_curves = LazyJSONLoader(
self._ctx, TAAR_SIMILARITY_BUCKET, TAAR_SIMILARITY_LRCURVES_KEY
)
self.logger = self._ctx[IMozLogging].get_logger("taar")
self._init_from_ctx()
@property
def donors_pool(self):
result, status = self._donors_pool.get()
if status:
# Force a reconstruction of the features cache on new
# donor pool data
self._build_features_caches()
return result
@property
def lr_curves(self):
result, status = self._lr_curves.get()
if status:
# Force a reconstruction of the features cache on new
# curve data
self._build_features_caches()
return result
def _init_from_ctx(self):
# Download the addon donors list.
if self.donors_pool is None:
self.logger.info(
"Similarity donors pool has not been fetched from S3: {}".format(
TAAR_SIMILARITY_DONOR_KEY
)
)
# Download the probability mapping curves from similarity to likelihood of being a good donor.
if self.lr_curves is None:
self.logger.error(
"Similarity LR Curves have not been fetched from S3: {}".format(
TAAR_SIMILARITY_LRCURVES_KEY
)
)
def _build_features_caches(self):
"""This function build two feature cache matrices.
That's the self.categorical_features and
self.continuous_features attributes.
One matrix is for the continuous features and the other is for
the categorical features. This is needed to speed up the similarity
recommendation process."""
_donors_pool = self._donors_pool.get()[0]
_lr_curves = self._lr_curves.get()[0]
if _donors_pool is None or _lr_curves is None:
# We need to have both donors_pool and lr_curves defined
# to reconstruct the matrices
return None
self.num_donors = len(_donors_pool)
# Build a numpy matrix cache for the continuous features.
self.continuous_features = np.zeros((self.num_donors, len(CONTINUOUS_FEATURES)))
for idx, d in enumerate(_donors_pool):
features = [d.get(specified_key) for specified_key in CONTINUOUS_FEATURES]
self.continuous_features[idx] = features
# Build the cache for categorical features.
self.categorical_features = np.zeros(
(self.num_donors, len(CATEGORICAL_FEATURES)), dtype="object"
)
for idx, d in enumerate(_donors_pool):
features = [d.get(specified_key) for specified_key in CATEGORICAL_FEATURES]
self.categorical_features[idx] = np.array([features], dtype="object")
self.logger.info("Reconstructed matrices for similarity recommender")
def can_recommend(self, client_data, extra_data={}):
# We can't recommend if we don't have our data files.
if self.donors_pool is None or self.lr_curves is None:
return False
# Check that the client info contains a non-None value for each required
# telemetry field.
REQUIRED_FIELDS = CATEGORICAL_FEATURES + CONTINUOUS_FEATURES
has_fields = all(
[client_data.get(f, None) is not None for f in REQUIRED_FIELDS]
)
if not has_fields:
# Can not add extra info because client_id may not be available.
self.logger.error("Unusable client data encountered")
return has_fields
def get_lr(self, score):
"""Compute a :float: likelihood ratio from a provided similarity score when compared
to two probability density functions which are computed and pre-loaded during init.
The numerator indicates the probability density that a particular similarity score
corresponds to a 'good' addon donor, i.e. a client that is similar in the sense of
telemetry variables. The denominator indicates the probability density that a particular
similarity score corresponds to a 'poor' addon donor
:param score: A similarity score between a pair of objects.
:returns: The approximate float likelihood ratio corresponding to provided score.
"""
# Find the index of the closest value that was precomputed in lr_curves
# This will significantly speed up |get_lr|.
# The lr_curves_cache is a list of scalar distance
# measurements
lr_curves_cache = np.array([s[0] for s in self.lr_curves])
# np.argmin produces the index to the part of the curve
# where distance is the smallest to the score which we are
# inspecting currently.
idx = np.argmin(abs(score - lr_curves_cache))
numer_val = self.lr_curves[idx][1][0]
denum_val = self.lr_curves[idx][1][1]
# Compute LR based on numerator and denominator values
return float(numer_val) / float(denum_val)
# # # CAUTION! # # #
# Any changes to this function must be reflected in the corresponding ETL job.
# https://github.com/mozilla/python_mozetl/blob/master/mozetl/taar/taar_similarity.py
#
def compute_clients_dist(self, client_data):
client_categorical_feats = [
client_data.get(specified_key) for specified_key in CATEGORICAL_FEATURES
]
client_continuous_feats = [
client_data.get(specified_key) for specified_key in CONTINUOUS_FEATURES
]
# Compute the distances between the user and the cached continuous features.
cont_features = distance.cdist(
self.continuous_features, np.array([client_continuous_feats]), "canberra"
)
# Compute the distances between the user and the cached categorical features.
cat_features = np.array(
[
[distance.hamming(x, client_categorical_feats)]
for x in self.categorical_features
]
)
# See the "Note about cdist optimization" in README.md for why we only use cdist once.
# Take the product of similarities to attain a univariate similarity score.
# Note that the addition of 0.001 to the continuous features
# sets a floor value to the distance in continuous similarity
# scores. There is no such floor value set for categorical
# features so this adjustment prioritizes categorical
# similarity over continous similarity
return (cont_features + FLOOR_DISTANCE_ADJUSTMENT) * cat_features
def _recommend(self, client_data, limit, extra_data={}):
donor_set_ranking, indices = self.get_similar_donors(client_data)
donor_log_lrs = np.log(donor_set_ranking)
# 1.0 corresponds to a log likelihood ratio of 0 meaning that donors are equally
# likely to be 'good'. A value > 0.0 is sufficient, but we like this to be high.
if donor_log_lrs[0] < 0.1:
self.logger.warning(
"Addons recommended with very low similarity score, perhaps donor set is unrepresentative",
extra={"maximum_similarity": donor_set_ranking[0]},
)
# Retrieve the indices of the highest ranked donors and then append their
# installed addons.
index_lrs_iter = zip(indices[donor_log_lrs > 0.0], donor_log_lrs)
recommendations = []
for (index, lrs) in index_lrs_iter:
for term in self.donors_pool[index]["active_addons"]:
candidate = (term, lrs)
recommendations.append(candidate)
# Sort recommendations on key (guid name)
recommendations = sorted(recommendations, key=lambda x: x[0])
recommendations_out = []
# recommendations must be sorted for this to work.
for guid_key, group in groupby(recommendations, key=lambda x: x[0]):
recommendations_out.append((guid_key, sum(j for i, j in group)))
# now re-sort on the basis of LLR.
recommendations_out = sorted(recommendations_out, key=lambda x: -x[1])
log_data = (
client_data["client_id"],
str([r[0] for r in recommendations_out[:limit]]),
)
self.logger.info(
"similarity_recommender_triggered, "
"client_id: [%s], guids: [%s]" % log_data
)
return recommendations_out
def recommend(self, client_data, limit, extra_data={}):
try:
recommendations_out = self._recommend(client_data, limit, extra_data)
except Exception as e:
recommendations_out = []
self._donors_pool.force_expiry()
self._lr_curves.force_expiry()
self.logger.exception(
"Similarity recommender crashed for {}".format(
client_data.get("client_id", "no-client-id")
),
e,
)
return recommendations_out[:limit]
|
mozilla/taar
|
taar/recommenders/recommendation_manager.py
|
RecommendationManager.recommend
|
python
|
def recommend(self, client_id, limit, extra_data={}):
if client_id in TEST_CLIENT_IDS:
data = self._whitelist_data.get()[0]
random.shuffle(data)
samples = data[:limit]
self.logger.info("Test ID detected [{}]".format(client_id))
return [(s, 1.1) for s in samples]
if client_id in EMPTY_TEST_CLIENT_IDS:
self.logger.info("Empty Test ID detected [{}]".format(client_id))
return []
client_info = self.profile_fetcher.get(client_id)
if client_info is None:
self.logger.info(
"Defaulting to empty results. No client info fetched from dynamo."
)
return []
results = self._ensemble_recommender.recommend(client_info, limit, extra_data)
return results
|
Return recommendations for the given client.
The recommendation logic will go through each recommender and
pick the first one that "can_recommend".
:param client_id: the client unique id.
:param limit: the maximum number of recommendations to return.
:param extra_data: a dictionary with extra client data.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/recommendation_manager.py#L85-L116
|
[
"def recommend(self, client_data, limit, extra_data={}):\n try:\n results = self._recommend(client_data, limit, extra_data)\n except Exception as e:\n results = []\n self._weight_cache._weights.force_expiry()\n self.logger.exception(\n \"Ensemble recommender crashed for {}\".format(\n client_data.get(\"client_id\", \"no-client-id\")\n ),\n e,\n )\n\n return results\n",
"def get(self, transform=None):\n \"\"\"\n Return the JSON defined at the S3 location in the constructor.\n\n The get method will reload the S3 object after the TTL has\n expired.\n Fetch the JSON object from cache or S3 if necessary\n \"\"\"\n if not self.has_expired() and self._cached_copy is not None:\n return self._cached_copy, False\n\n return self._refresh_cache(transform), True\n"
] |
class RecommendationManager:
"""This class determines which of the set of recommendation
engines will actually be used to generate recommendations."""
def __init__(self, ctx):
"""Initialize the user profile fetcher and the recommenders.
"""
self._ctx = ctx
self.logger = self._ctx[IMozLogging].get_logger("taar")
assert "profile_fetcher" in self._ctx
self.profile_fetcher = ctx["profile_fetcher"]
self._ensemble_recommender = EnsembleRecommender(self._ctx.child())
# The whitelist data is only used for test client IDs
self._whitelist_data = LazyJSONLoader(
self._ctx, TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY
)
|
mozilla/taar
|
taar/profile_fetcher.py
|
ProfileController.get_client_profile
|
python
|
def get_client_profile(self, client_id):
try:
response = self._table.get_item(Key={'client_id': client_id})
compressed_bytes = response['Item']['json_payload'].value
json_byte_data = zlib.decompress(compressed_bytes)
json_str_data = json_byte_data.decode('utf8')
return json.loads(json_str_data)
except KeyError:
# No client ID found - not really an error
return None
except Exception as e:
# Return None on error. The caller in ProfileFetcher will
# handle error logging
msg = "Error loading client data for {}. Error: {}"
self.logger.debug(msg.format(client_id, str(e)))
return None
|
This fetches a single client record out of DynamoDB
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/profile_fetcher.py#L33-L50
| null |
class ProfileController:
"""
This class provides basic read/write access into a AWS DynamoDB
backed datastore. The profile controller and profile fetcher code
should eventually be merged as individually they don't "pull their
weight".
"""
def __init__(self, ctx, region_name, table_name):
"""
Configure access to the DynamoDB instance
"""
self._ctx = ctx
self.logger = self._ctx[IMozLogging].get_logger('taar')
self._ddb = boto3.resource('dynamodb', region_name=region_name)
self._table = self._ddb.Table(table_name)
|
mozilla/taar
|
taar/plugin.py
|
clean_promoted_guids
|
python
|
def clean_promoted_guids(raw_promoted_guids):
valid = True
for row in raw_promoted_guids:
if len(row) != 2:
valid = False
break
if not (
(isinstance(row[0], str) or isinstance(row[0], unicode))
and (isinstance(row[1], int) or isinstance(row[1], float)) # noqa
):
valid = False
break
if valid:
return raw_promoted_guids
return []
|
Verify that the promoted GUIDs are formatted correctly,
otherwise strip it down into an empty list.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/plugin.py#L32-L52
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from decouple import config
from flask import request
import json
# TAAR specific libraries
from taar.context import default_context
from taar.profile_fetcher import ProfileFetcher
from taar import recommenders
# These are configurations that are specific to the TAAR library
TAAR_MAX_RESULTS = config("TAAR_MAX_RESULTS", default=10, cast=int)
class ResourceProxy(object):
def __init__(self):
self._resource = None
def setResource(self, rsrc):
self._resource = rsrc
def getResource(self):
return self._resource
PROXY_MANAGER = ResourceProxy()
def merge_promoted_guids(promoted_guids, recommended_guids):
guids = set()
final = []
tmp = sorted(
promoted_guids + [x for x in recommended_guids],
key=lambda x: x[1],
reverse=True,
)
for guid, weight in tmp:
if guid not in guids:
final.append((guid, weight))
guids.add(guid)
return final
def configure_plugin(app): # noqa: C901
"""
This is a factory function that configures all the routes for
flask given a particular library.
"""
@app.route(
"/v1/api/client_has_addon/<hashed_client_id>/<addon_id>/", methods=["GET"]
)
def client_has_addon(hashed_client_id, addon_id):
# Use the module global PROXY_MANAGER
global PROXY_MANAGER
recommendation_manager = check_proxy_manager(PROXY_MANAGER)
pf = recommendation_manager._ctx["profile_fetcher"]
client_meta = pf.get(hashed_client_id)
if client_meta is None:
# no valid client metadata was found for the given
# clientId
result = {"results": False, 'error': 'No client found'}
response = app.response_class(
response=json.dumps(result), status=200, mimetype="application/json"
)
return response
result = {"results": addon_id in client_meta.get("installed_addons", [])}
response = app.response_class(
response=json.dumps(result), status=200, mimetype="application/json"
)
return response
@app.route("/v1/api/recommendations/<hashed_client_id>/", methods=["GET", "POST"])
def recommendations(hashed_client_id):
"""Return a list of recommendations provided a telemetry client_id."""
# Use the module global PROXY_MANAGER
global PROXY_MANAGER
extra_data = {}
extra_data["options"] = {}
extra_data["options"]["promoted"] = []
try:
if request.method == "POST":
json_data = request.data
# At least Python3.5 returns request.data as bytes
# type instead of a string type.
# Both Python2.7 and Python3.7 return a string type
if type(json_data) == bytes:
json_data = json_data.decode("utf8")
if json_data != "":
post_data = json.loads(json_data)
raw_promoted_guids = post_data.get("options", {}).get(
"promoted", []
)
promoted_guids = clean_promoted_guids(raw_promoted_guids)
extra_data["options"]["promoted"] = promoted_guids
except Exception as e:
jdata = {}
jdata["results"] = []
jdata["error"] = "Invalid JSON in POST: {}".format(e)
return app.response_class(
response=json.dumps(jdata, status=400, mimetype="application/json")
)
# Coerce the uuid.UUID type into a string
client_id = str(hashed_client_id)
locale = request.args.get("locale", None)
if locale is not None:
extra_data["locale"] = locale
platform = request.args.get("platform", None)
if platform is not None:
extra_data["platform"] = platform
recommendation_manager = check_proxy_manager(PROXY_MANAGER)
recommendations = recommendation_manager.recommend(
client_id=client_id, limit=TAAR_MAX_RESULTS, extra_data=extra_data
)
promoted_guids = extra_data.get("options", {}).get("promoted", [])
recommendations = merge_promoted_guids(promoted_guids, recommendations)
# Strip out weights from TAAR results to maintain compatibility
# with TAAR 1.0
jdata = {"results": [x[0] for x in recommendations]}
response = app.response_class(
response=json.dumps(jdata), status=200, mimetype="application/json"
)
return response
def check_proxy_manager(PROXY_MANAGER):
if PROXY_MANAGER.getResource() is None:
ctx = default_context()
profile_fetcher = ProfileFetcher(ctx)
ctx["profile_fetcher"] = profile_fetcher
# Lock the context down after we've got basic bits installed
root_ctx = ctx.child()
r_factory = recommenders.RecommenderFactory(root_ctx)
root_ctx["recommender_factory"] = r_factory
instance = recommenders.RecommendationManager(root_ctx.child())
PROXY_MANAGER.setResource(instance)
return PROXY_MANAGER.getResource()
class MyPlugin:
def set(self, config_options):
"""
This setter is primarily so that we can instrument the
cached RecommendationManager implementation under test.
All plugins should implement this set method to enable
overwriting configuration options with a TAAR library.
"""
global PROXY_MANAGER
if "PROXY_RESOURCE" in config_options:
PROXY_MANAGER._resource = config_options["PROXY_RESOURCE"]
return MyPlugin()
|
mozilla/taar
|
taar/plugin.py
|
configure_plugin
|
python
|
def configure_plugin(app): # noqa: C901
@app.route(
"/v1/api/client_has_addon/<hashed_client_id>/<addon_id>/", methods=["GET"]
)
def client_has_addon(hashed_client_id, addon_id):
# Use the module global PROXY_MANAGER
global PROXY_MANAGER
recommendation_manager = check_proxy_manager(PROXY_MANAGER)
pf = recommendation_manager._ctx["profile_fetcher"]
client_meta = pf.get(hashed_client_id)
if client_meta is None:
# no valid client metadata was found for the given
# clientId
result = {"results": False, 'error': 'No client found'}
response = app.response_class(
response=json.dumps(result), status=200, mimetype="application/json"
)
return response
result = {"results": addon_id in client_meta.get("installed_addons", [])}
response = app.response_class(
response=json.dumps(result), status=200, mimetype="application/json"
)
return response
@app.route("/v1/api/recommendations/<hashed_client_id>/", methods=["GET", "POST"])
def recommendations(hashed_client_id):
"""Return a list of recommendations provided a telemetry client_id."""
# Use the module global PROXY_MANAGER
global PROXY_MANAGER
extra_data = {}
extra_data["options"] = {}
extra_data["options"]["promoted"] = []
try:
if request.method == "POST":
json_data = request.data
# At least Python3.5 returns request.data as bytes
# type instead of a string type.
# Both Python2.7 and Python3.7 return a string type
if type(json_data) == bytes:
json_data = json_data.decode("utf8")
if json_data != "":
post_data = json.loads(json_data)
raw_promoted_guids = post_data.get("options", {}).get(
"promoted", []
)
promoted_guids = clean_promoted_guids(raw_promoted_guids)
extra_data["options"]["promoted"] = promoted_guids
except Exception as e:
jdata = {}
jdata["results"] = []
jdata["error"] = "Invalid JSON in POST: {}".format(e)
return app.response_class(
response=json.dumps(jdata, status=400, mimetype="application/json")
)
# Coerce the uuid.UUID type into a string
client_id = str(hashed_client_id)
locale = request.args.get("locale", None)
if locale is not None:
extra_data["locale"] = locale
platform = request.args.get("platform", None)
if platform is not None:
extra_data["platform"] = platform
recommendation_manager = check_proxy_manager(PROXY_MANAGER)
recommendations = recommendation_manager.recommend(
client_id=client_id, limit=TAAR_MAX_RESULTS, extra_data=extra_data
)
promoted_guids = extra_data.get("options", {}).get("promoted", [])
recommendations = merge_promoted_guids(promoted_guids, recommendations)
# Strip out weights from TAAR results to maintain compatibility
# with TAAR 1.0
jdata = {"results": [x[0] for x in recommendations]}
response = app.response_class(
response=json.dumps(jdata), status=200, mimetype="application/json"
)
return response
def check_proxy_manager(PROXY_MANAGER):
if PROXY_MANAGER.getResource() is None:
ctx = default_context()
profile_fetcher = ProfileFetcher(ctx)
ctx["profile_fetcher"] = profile_fetcher
# Lock the context down after we've got basic bits installed
root_ctx = ctx.child()
r_factory = recommenders.RecommenderFactory(root_ctx)
root_ctx["recommender_factory"] = r_factory
instance = recommenders.RecommendationManager(root_ctx.child())
PROXY_MANAGER.setResource(instance)
return PROXY_MANAGER.getResource()
class MyPlugin:
def set(self, config_options):
"""
This setter is primarily so that we can instrument the
cached RecommendationManager implementation under test.
All plugins should implement this set method to enable
overwriting configuration options with a TAAR library.
"""
global PROXY_MANAGER
if "PROXY_RESOURCE" in config_options:
PROXY_MANAGER._resource = config_options["PROXY_RESOURCE"]
return MyPlugin()
|
This is a factory function that configures all the routes for
flask given a particular library.
|
train
|
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/plugin.py#L70-L192
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from decouple import config
from flask import request
import json
# TAAR specific libraries
from taar.context import default_context
from taar.profile_fetcher import ProfileFetcher
from taar import recommenders
# These are configurations that are specific to the TAAR library
TAAR_MAX_RESULTS = config("TAAR_MAX_RESULTS", default=10, cast=int)
class ResourceProxy(object):
def __init__(self):
self._resource = None
def setResource(self, rsrc):
self._resource = rsrc
def getResource(self):
return self._resource
PROXY_MANAGER = ResourceProxy()
def clean_promoted_guids(raw_promoted_guids):
""" Verify that the promoted GUIDs are formatted correctly,
otherwise strip it down into an empty list.
"""
valid = True
for row in raw_promoted_guids:
if len(row) != 2:
valid = False
break
if not (
(isinstance(row[0], str) or isinstance(row[0], unicode))
and (isinstance(row[1], int) or isinstance(row[1], float)) # noqa
):
valid = False
break
if valid:
return raw_promoted_guids
return []
def merge_promoted_guids(promoted_guids, recommended_guids):
guids = set()
final = []
tmp = sorted(
promoted_guids + [x for x in recommended_guids],
key=lambda x: x[1],
reverse=True,
)
for guid, weight in tmp:
if guid not in guids:
final.append((guid, weight))
guids.add(guid)
return final
|
amelchio/pysonos
|
pysonos/services.py
|
Service.iter_actions
|
python
|
def iter_actions(self):
# pylint: disable=too-many-locals
# pylint: disable=invalid-name
ns = '{urn:schemas-upnp-org:service-1-0}'
# get the scpd body as bytes, and feed directly to elementtree
# which likes to receive bytes
scpd_body = requests.get(self.base_url + self.scpd_url).content
tree = XML.fromstring(scpd_body)
# parse the state variables to get the relevant variable types
vartypes = {}
srvStateTables = tree.findall('{}serviceStateTable'.format(ns))
for srvStateTable in srvStateTables:
statevars = srvStateTable.findall('{}stateVariable'.format(ns))
for state in statevars:
name = state.findtext('{}name'.format(ns))
datatype = state.findtext('{}dataType'.format(ns))
default = state.findtext('{}defaultValue'.format(ns))
value_list_elt = state.find('{}allowedValueList'.format(ns))
if value_list_elt is None:
value_list_elt = ()
value_list = [item.text for item in value_list_elt] or None
value_range_elt = state.find('{}allowedValueRange'.format(ns))
if value_range_elt is None:
value_range_elt = ()
value_range = [item.text for item in value_range_elt] or None
vartypes[name] = Vartype(datatype, default, value_list,
value_range)
# find all the actions
actionLists = tree.findall('{}actionList'.format(ns))
for actionList in actionLists:
actions = actionList.findall('{}action'.format(ns))
for i in actions:
action_name = i.findtext('{}name'.format(ns))
argLists = i.findall('{}argumentList'.format(ns))
for argList in argLists:
args_iter = argList.findall('{}argument'.format(ns))
in_args = []
out_args = []
for arg in args_iter:
arg_name = arg.findtext('{}name'.format(ns))
direction = arg.findtext('{}direction'.format(ns))
related_variable = arg.findtext(
'{}relatedStateVariable'.format(ns))
vartype = vartypes[related_variable]
if direction == "in":
in_args.append(Argument(arg_name, vartype))
else:
out_args.append(Argument(arg_name, vartype))
yield Action(action_name, in_args, out_args)
|
Yield the service's actions with their arguments.
Yields:
`Action`: the next action.
Each action is an Action namedtuple, consisting of action_name
(a string), in_args (a list of Argument namedtuples consisting of name
and argtype), and out_args (ditto), eg::
Action(
name='SetFormat',
in_args=[
Argument(name='DesiredTimeFormat', vartype=<Vartype>),
Argument(name='DesiredDateFormat', vartype=<Vartype>)],
out_args=[]
)
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/services.py#L645-L711
| null |
class Service(object):
"""A class representing a UPnP service.
This is the base class for all Sonos Service classes. This class has a
dynamic method dispatcher. Calls to methods which are not explicitly
defined here are dispatched automatically to the service action with the
same name.
"""
# pylint: disable=bad-continuation
soap_body_template = (
'<?xml version="1.0"?>'
'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"'
' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
'<s:Body>'
'<u:{action} xmlns:u="urn:schemas-upnp-org:service:'
'{service_type}:{version}">'
'{arguments}'
'</u:{action}>'
'</s:Body>'
'</s:Envelope>') # noqa PEP8
def __init__(self, soco):
"""
Args:
soco (SoCo): A `SoCo` instance to which the UPnP Actions will be
sent
"""
#: `SoCo`: The `SoCo` instance to which UPnP Actions are sent
self.soco = soco
# Some defaults. Some or all these will need to be overridden
# specifically in a sub-class. There is other information we could
# record, but this will do for the moment. Info about a Sonos device is
# available at <IP_address>/xml/device_description.xml in the
# <service> tags
#: str: The UPnP service type.
self.service_type = self.__class__.__name__
#: str: The UPnP service version.
self.version = 1
self.service_id = self.service_type
#: str: The base URL for sending UPnP Actions.
self.base_url = 'http://{}:1400'.format(self.soco.ip_address)
#: str: The UPnP Control URL.
self.control_url = '/{}/Control'.format(self.service_type)
#: str: The service control protocol description URL.
self.scpd_url = '/xml/{}{}.xml'.format(
self.service_type, self.version)
#: str: The service eventing subscription URL.
self.event_subscription_url = '/{}/Event'.format(self.service_type)
#: A cache for storing the result of network calls. By default, this is
#: a `TimedCache` with a default timeout=0.
self.cache = Cache(default_timeout=0)
# Caching variables for actions and event_vars, will be filled when
# they are requested for the first time
self._actions = None
self._event_vars = None
# From table 3.3 in
# http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.1.pdf
# This list may not be complete, but should be good enough to be going
# on with. Error codes between 700-799 are defined for particular
# services, and may be overriden in subclasses. Error codes >800
# are generally SONOS specific. NB It may well be that SONOS does not
# use some of these error codes.
# pylint: disable=invalid-name
self.UPNP_ERRORS = {
400: 'Bad Request',
401: 'Invalid Action',
402: 'Invalid Args',
404: 'Invalid Var',
412: 'Precondition Failed',
501: 'Action Failed',
600: 'Argument Value Invalid',
601: 'Argument Value Out of Range',
602: 'Optional Action Not Implemented',
603: 'Out Of Memory',
604: 'Human Intervention Required',
605: 'String Argument Too Long',
606: 'Action Not Authorized',
607: 'Signature Failure',
608: 'Signature Missing',
609: 'Not Encrypted',
610: 'Invalid Sequence',
611: 'Invalid Control URL',
612: 'No Such Session',
}
self.DEFAULT_ARGS = {}
def __getattr__(self, action):
"""Called when a method on the instance cannot be found.
Causes an action to be sent to UPnP server. See also
`object.__getattr__`.
Args:
action (str): The name of the unknown method.
Returns:
callable: The callable to be invoked. .
"""
# Define a function to be invoked as the method, which calls
# send_command.
def _dispatcher(self, *args, **kwargs):
"""Dispatch to send_command."""
return self.send_command(action, *args, **kwargs)
# rename the function so it appears to be the called method. We
# probably don't need this, but it doesn't harm
_dispatcher.__name__ = action
# _dispatcher is now an unbound menthod, but we need a bound method.
# This turns an unbound method into a bound method (i.e. one that
# takes self - an instance of the class - as the first parameter)
# pylint: disable=no-member
method = _dispatcher.__get__(self, self.__class__)
# Now we have a bound method, we cache it on this instance, so that
# next time we don't have to go through this again
setattr(self, action, method)
log.debug("Dispatching method %s", action)
# return our new bound method, which will be called by Python
return method
@staticmethod
def wrap_arguments(args=None):
"""Wrap a list of tuples in xml ready to pass into a SOAP request.
Args:
args (list): a list of (name, value) tuples specifying the
name of each argument and its value, eg
``[('InstanceID', 0), ('Speed', 1)]``. The value
can be a string or something with a string representation. The
arguments are escaped and wrapped in <name> and <value> tags.
Example:
>>> from soco import SoCo
>>> device = SoCo('192.168.1.101')
>>> s = Service(device)
>>> print(s.wrap_arguments([('InstanceID', 0), ('Speed', 1)]))
<InstanceID>0</InstanceID><Speed>1</Speed>'
"""
if args is None:
args = []
tags = []
for name, value in args:
tag = "<{name}>{value}</{name}>".format(
name=name, value=escape("%s" % value, {'"': """}))
# % converts to unicode because we are using unicode literals.
# Avoids use of 'unicode' function which does not exist in python 3
tags.append(tag)
xml = "".join(tags)
return xml
@staticmethod
def unwrap_arguments(xml_response):
"""Extract arguments and their values from a SOAP response.
Args:
xml_response (str): SOAP/xml response text (unicode,
not utf-8).
Returns:
dict: a dict of ``{argument_name: value}`` items.
"""
# A UPnP SOAP response (including headers) looks like this:
# HTTP/1.1 200 OK
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8" DATE: when response was
# generated
# EXT:
# SERVER: OS/version UPnP/1.0 product/version
#
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <u:actionNameResponse
# xmlns:u="urn:schemas-upnp-org:service:serviceType:v">
# <argumentName>out arg value</argumentName>
# ... other out args and their values go here, if any
# </u:actionNameResponse>
# </s:Body>
# </s:Envelope>
# Get all tags in order. Elementree (in python 2.x) seems to prefer to
# be fed bytes, rather than unicode
xml_response = xml_response.encode('utf-8')
try:
tree = XML.fromstring(xml_response)
except XML.ParseError:
# Try to filter illegal xml chars (as unicode), in case that is
# the reason for the parse error
filtered = illegal_xml_re.sub('', xml_response.decode('utf-8'))\
.encode('utf-8')
tree = XML.fromstring(filtered)
# Get the first child of the <Body> tag which will be
# <{actionNameResponse}> (depends on what actionName is). Turn the
# children of this into a {tagname, content} dict. XML unescaping
# is carried out for us by elementree.
action_response = tree.find(
"{http://schemas.xmlsoap.org/soap/envelope/}Body")[0]
return dict((i.tag, i.text or "") for i in action_response)
def compose_args(self, action_name, in_argdict):
"""Compose the argument list from an argument dictionary, with
respect for default values.
Args:
action_name (str): The name of the action to be performed.
in_argdict (dict): Arguments as a dict, eg
``{'InstanceID': 0, 'Speed': 1}. The values
can be a string or something with a string representation.
Returns:
list: a list of ``(name, value)`` tuples.
Raises:
`AttributeError`: If this service does not support the action.
`ValueError`: If the argument lists do not match the action
signature.
"""
for action in self.actions:
if action.name == action_name:
# The found 'action' will be visible from outside the loop
break
else:
raise AttributeError('Unknown Action: {0}'.format(action_name))
# Check for given argument names which do not occur in the expected
# argument list
# pylint: disable=undefined-loop-variable
unexpected = set(in_argdict) - \
set(argument.name for argument in action.in_args)
if unexpected:
raise ValueError(
"Unexpected argument '{0}'. Method signature: {1}"
.format(next(iter(unexpected)), str(action))
)
# List the (name, value) tuples for each argument in the argument list
composed = []
for argument in action.in_args:
name = argument.name
if name in in_argdict:
composed.append((name, in_argdict[name]))
continue
if name in self.DEFAULT_ARGS:
composed.append((name, self.DEFAULT_ARGS[name]))
continue
if argument.vartype.default is not None:
composed.append((name, argument.vartype.default))
raise ValueError(
"Missing argument '{0}'. Method signature: {1}"
.format(argument.name, str(action))
)
return composed
def build_command(self, action, args=None):
"""Build a SOAP request.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples.
Returns:
tuple: a tuple containing the POST headers (as a dict) and a
string containing the relevant SOAP body. Does not set
content-length, or host headers, which are completed upon
sending.
"""
# A complete request should look something like this:
# POST path of control URL HTTP/1.1
# HOST: host of control URL:port of control URL
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8"
# SOAPACTION: "urn:schemas-upnp-org:service:serviceType:v#actionName"
#
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <u:actionName
# xmlns:u="urn:schemas-upnp-org:service:serviceType:v">
# <argumentName>in arg value</argumentName>
# ... other in args and their values go here, if any
# </u:actionName>
# </s:Body>
# </s:Envelope>
arguments = self.wrap_arguments(args)
body = self.soap_body_template.format(
arguments=arguments, action=action, service_type=self.service_type,
version=self.version)
soap_action_template = \
"urn:schemas-upnp-org:service:{service_type}:{version}#{action}"
soap_action = soap_action_template.format(
service_type=self.service_type, version=self.version,
action=action)
headers = {'Content-Type': 'text/xml; charset="utf-8"',
'SOAPACTION': soap_action}
# Note that although we set the charset to utf-8 here, in fact the
# body is still unicode. It will only be converted to bytes when it
# is set over the network
return (headers, body)
def send_command(self, action, args=None, cache=None, cache_timeout=None,
**kwargs):
"""Send a command to a Sonos device.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples, as an alternative to ``kwargs``.
cache (Cache): A cache is operated so that the result will be
stored for up to ``cache_timeout`` seconds, and a subsequent
call with the same arguments within that period will be
returned from the cache, saving a further network call. The
cache may be invalidated or even primed from another thread
(for example if a UPnP event is received to indicate that
the state of the Sonos device has changed). If
``cache_timeout`` is missing or `None`, the cache will use a
default value (which may be 0 - see `cache`). By default,
the cache identified by the service's `cache` attribute will
be used, but a different cache object may be specified in
the `cache` parameter.
kwargs: Relevant arguments for the command.
Returns:
dict: a dict of ``{argument_name, value}`` items.
Raises:
`AttributeError`: If this service does not support the action.
`ValueError`: If the argument lists do not match the action
signature.
`SoCoUPnPException`: if a SOAP error occurs.
`UnknownSoCoException`: if an unknonwn UPnP error occurs.
`requests.exceptions.HTTPError`: if an http error occurs.
"""
if args is None:
args = self.compose_args(action, kwargs)
if cache is None:
cache = self.cache
result = cache.get(action, args)
if result is not None:
log.debug("Cache hit")
return result
# Cache miss, so go ahead and make a network call
headers, body = self.build_command(action, args)
log.info("Sending %s %s to %s", action, args, self.soco.ip_address)
log.debug("Sending %s, %s", headers, prettify(body))
# Convert the body to bytes, and send it.
try:
response = requests.post(
self.base_url + self.control_url,
headers=headers,
data=body.encode('utf-8'),
timeout=3,
)
except requests.exceptions.RequestException:
raise SoCoException('Connection error')
log.debug("Received %s, %s", response.headers, response.text)
status = response.status_code
log.info(
"Received status %s from %s", status, self.soco.ip_address)
if status == 200:
# The response is good. Get the output params, and return them.
# NB an empty dict is a valid result. It just means that no
# params are returned. By using response.text, we rely upon
# the requests library to convert to unicode for us.
result = self.unwrap_arguments(response.text) or True
# Store in the cache. There is no need to do this if there was an
# error, since we would want to try a network call again.
cache.put(result, action, args, timeout=cache_timeout)
return result
elif status == 500:
# Internal server error. UPnP requires this to be returned if the
# device does not like the action for some reason. The returned
# content will be a SOAP Fault. Parse it and raise an error.
self.handle_upnp_error(response.text)
else:
# Something else has gone wrong. Probably a network error. Let
# Requests handle it
response.raise_for_status()
return None
def handle_upnp_error(self, xml_error):
"""Disect a UPnP error, and raise an appropriate exception.
Args:
xml_error (str): a unicode string containing the body of the
UPnP/SOAP Fault response. Raises an exception containing the
error code.
"""
# An error code looks something like this:
# HTTP/1.1 500 Internal Server Error
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8"
# DATE: when response was generated
# EXT:
# SERVER: OS/version UPnP/1.0 product/version
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <s:Fault>
# <faultcode>s:Client</faultcode>
# <faultstring>UPnPError</faultstring>
# <detail>
# <UPnPError xmlns="urn:schemas-upnp-org:control-1-0">
# <errorCode>error code</errorCode>
# <errorDescription>error string</errorDescription>
# </UPnPError>
# </detail>
# </s:Fault>
# </s:Body>
# </s:Envelope>
#
# All that matters for our purposes is the errorCode.
# errorDescription is not required, and Sonos does not seem to use it.
# NB need to encode unicode strings before passing to ElementTree
xml_error = xml_error.encode('utf-8')
error = XML.fromstring(xml_error)
log.debug("Error %s", xml_error)
error_code = error.findtext(
'.//{urn:schemas-upnp-org:control-1-0}errorCode')
if error_code is not None:
description = self.UPNP_ERRORS.get(int(error_code), '')
raise SoCoUPnPException(
message='UPnP Error {} received: {} from {}'.format(
error_code, description, self.soco.ip_address),
error_code=error_code,
error_description=description,
error_xml=xml_error
)
# Unknown error, so just return the entire response
log.error("Unknown error received from %s", self.soco.ip_address)
raise UnknownSoCoException(xml_error)
def subscribe(
self, requested_timeout=None, auto_renew=False, event_queue=None):
"""Subscribe to the service's events.
Args:
requested_timeout (int, optional): If requested_timeout is
provided, a subscription valid for that
number of seconds will be requested, but not guaranteed. Check
`Subscription.timeout` on return to find out what period of
validity is actually allocated.
auto_renew (bool): If auto_renew is `True`, the subscription will
automatically be renewed just before it expires, if possible.
Default is `False`.
event_queue (:class:`~queue.Queue`): a thread-safe queue object on
which received events will be put. If not specified,
a (:class:`~queue.Queue`) will be created and used.
Returns:
`Subscription`: an insance of `Subscription`, representing
the new subscription.
To unsubscribe, call the `unsubscribe` method on the returned object.
"""
subscription = Subscription(
self, event_queue)
subscription.subscribe(
requested_timeout=requested_timeout, auto_renew=auto_renew)
return subscription
def _update_cache_on_event(self, event):
"""Update the cache when an event is received.
This will be called before an event is put onto the event queue. Events
will often indicate that the Sonos device's state has changed, so this
opportunity is made available for the service to update its cache. The
event will be put onto the event queue once this method returns.
`event` is an Event namedtuple: ('sid', 'seq', 'service', 'variables')
.. warning:: This method will not be called from the main thread but
by one or more threads, which handle the events as they come in.
You *must not* access any class, instance or global variables
without appropriate locks. Treat all parameters passed to this
method as read only.
"""
@property
def actions(self):
"""The service's actions with their arguments.
Returns:
list(`Action`): A list of Action namedtuples, consisting of
action_name (str), in_args (list of Argument namedtuples,
consisting of name and argtype), and out_args (ditto).
The return value looks like this::
[
Action(
name='GetMute',
in_args=[
Argument(name='InstanceID', ...),
Argument(
name='Channel',
vartype='string',
list=['Master', 'LF', 'RF', 'SpeakerOnly'],
range=None
)
],
out_args=[
Argument(name='CurrentMute, ...)
]
)
Action(...)
]
Its string representation will look like this::
GetMute(InstanceID: ui4, Channel: [Master, LF, RF, SpeakerOnly]) \
-> {CurrentMute: boolean}
"""
if self._actions is None:
self._actions = list(self.iter_actions())
return self._actions
@property
def event_vars(self):
"""The service's eventable variables.
Returns:
list(tuple): A list of (variable name, data type) tuples.
"""
if self._event_vars is None:
self._event_vars = list(self.iter_event_vars())
return self._event_vars
def iter_event_vars(self):
"""Yield the services eventable variables.
Yields:
`tuple`: a tuple of (variable name, data type).
"""
# pylint: disable=invalid-name
ns = '{urn:schemas-upnp-org:service-1-0}'
scpd_body = requests.get(self.base_url + self.scpd_url).text
tree = XML.fromstring(scpd_body.encode('utf-8'))
# parse the state variables to get the relevant variable types
statevars = tree.findall('{}stateVariable'.format(ns))
for state in statevars:
# We are only interested if 'sendEvents' is 'yes', i.e this
# is an eventable variable
if state.attrib['sendEvents'] == "yes":
name = state.findtext('{}name'.format(ns))
vartype = state.findtext('{}dataType'.format(ns))
yield (name, vartype)
|
amelchio/pysonos
|
pysonos/events.py
|
parse_event_xml
|
python
|
def parse_event_xml(xml_event):
result = {}
tree = XML.fromstring(xml_event)
# property values are just under the propertyset, which
# uses this namespace
properties = tree.findall(
'{urn:schemas-upnp-org:event-1-0}property')
for prop in properties: # pylint: disable=too-many-nested-blocks
for variable in prop:
# Special handling for a LastChange event specially. For details on
# LastChange events, see
# http://upnp.org/specs/av/UPnP-av-RenderingControl-v1-Service.pdf
# and http://upnp.org/specs/av/UPnP-av-AVTransport-v1-Service.pdf
if variable.tag == "LastChange":
last_change_tree = XML.fromstring(
variable.text.encode('utf-8'))
# We assume there is only one InstanceID tag. This is true for
# Sonos, as far as we know.
# InstanceID can be in one of two namespaces, depending on
# whether we are looking at an avTransport event, a
# renderingControl event, or a Queue event
# (there, it is named QueueID)
instance = last_change_tree.find(
"{urn:schemas-upnp-org:metadata-1-0/AVT/}InstanceID")
if instance is None:
instance = last_change_tree.find(
"{urn:schemas-upnp-org:metadata-1-0/RCS/}InstanceID")
if instance is None:
instance = last_change_tree.find(
"{urn:schemas-sonos-com:metadata-1-0/Queue/}QueueID")
# Look at each variable within the LastChange event
for last_change_var in instance:
tag = last_change_var.tag
# Remove any namespaces from the tags
if tag.startswith('{'):
tag = tag.split('}', 1)[1]
# Un-camel case it
tag = camel_to_underscore(tag)
# Now extract the relevant value for the variable.
# The UPnP specs suggest that the value of any variable
# evented via a LastChange Event will be in the 'val'
# attribute, but audio related variables may also have a
# 'channel' attribute. In addition, it seems that Sonos
# sometimes uses a text value instead: see
# http://forums.sonos.com/showthread.php?t=34663
value = last_change_var.get('val')
if value is None:
value = last_change_var.text
# If DIDL metadata is returned, convert it to a music
# library data structure
if value.startswith('<DIDL-Lite'):
# Wrap any parsing exception in a SoCoFault, so the
# user can handle it
try:
didl = from_didl_string(value)
if not didl:
continue
value = didl[0]
except SoCoException as original_exception:
log.debug("Event contains illegal metadata"
"for '%s'.\n"
"Error message: '%s'\n"
"The result will be a SoCoFault.",
tag, str(original_exception))
event_parse_exception = EventParseException(
tag, value, original_exception
)
value = SoCoFault(event_parse_exception)
channel = last_change_var.get('channel')
if channel is not None:
if result.get(tag) is None:
result[tag] = {}
result[tag][channel] = value
else:
result[tag] = value
else:
result[camel_to_underscore(variable.tag)] = variable.text
return result
|
Parse the body of a UPnP event.
Args:
xml_event (bytes): bytes containing the body of the event encoded
with utf-8.
Returns:
dict: A dict with keys representing the evented variables. The
relevant value will usually be a string representation of the
variable's value, but may on occasion be:
* a dict (eg when the volume changes, the value will itself be a
dict containing the volume for each channel:
:code:`{'Volume': {'LF': '100', 'RF': '100', 'Master': '36'}}`)
* an instance of a `DidlObject` subclass (eg if it represents
track metadata).
* a `SoCoFault` (if a variable contains illegal metadata)
Example:
Run this code, and change your volume, tracks etc::
from __future__ import print_function
try:
from queue import Empty
except: # Py2.7
from Queue import Empty
import soco
from pprint import pprint
from soco.events import event_listener
# pick a device at random
device = soco.discover().pop()
print (device.player_name)
sub = device.renderingControl.subscribe()
sub2 = device.avTransport.subscribe()
while True:
try:
event = sub.events.get(timeout=0.5)
pprint (event.variables)
except Empty:
pass
try:
event = sub2.events.get(timeout=0.5)
pprint (event.variables)
except Empty:
pass
except KeyboardInterrupt:
sub.unsubscribe()
sub2.unsubscribe()
event_listener.stop()
break
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/events.py#L37-L170
|
[
"def from_didl_string(string):\n \"\"\"Convert a unicode xml string to a list of `DIDLObjects <DidlObject>`.\n\n Args:\n string (str): A unicode string containing an XML representation of one\n or more DIDL-Lite items (in the form ``'<DIDL-Lite ...>\n ...</DIDL-Lite>'``)\n\n Returns:\n list: A list of one or more instances of `DidlObject` or a subclass\n \"\"\"\n items = []\n root = XML.fromstring(string.encode('utf-8'))\n for elt in root:\n if elt.tag.endswith('item') or elt.tag.endswith('container'):\n item_class = elt.findtext(ns_tag('upnp', 'class'))\n\n # In case this class has an # specified unofficial\n # subclass, ignore it by stripping it from item_class\n if '.#' in item_class:\n item_class = item_class[:item_class.find('.#')]\n\n try:\n cls = _DIDL_CLASS_TO_CLASS[item_class]\n except KeyError:\n raise DIDLMetadataError(\"Unknown UPnP class: %s\" % item_class)\n try:\n item = cls.from_element(elt)\n item = attempt_datastructure_upgrade(item)\n items.append(item)\n except DIDLMetadataError as ex:\n _LOG.info(\"Ignored '%s' on %s\", ex, XML.tostring(elt))\n else:\n # <desc> elements are allowed as an immediate child of <DIDL-Lite>\n # according to the spec, but I have not seen one there in Sonos, so\n # we treat them as illegal. May need to fix this if this\n # causes problems.\n raise DIDLMetadataError(\"Illegal child of DIDL element: <%s>\"\n % elt.tag)\n _LOG.debug(\n 'Created data structures: %.20s (CUT) from Didl string \"%.20s\" (CUT)',\n items, string,\n )\n return items\n",
"def camel_to_underscore(string):\n \"\"\"Convert camelcase to lowercase and underscore.\n\n Recipe from http://stackoverflow.com/a/1176023\n\n Args:\n string (str): The string to convert.\n\n Returns:\n str: The converted string.\n \"\"\"\n string = FIRST_CAP_RE.sub(r'\\1_\\2', string)\n return ALL_CAP_RE.sub(r'\\1_\\2', string).lower()\n"
] |
# -*- coding: utf-8 -*-
# pylint: disable=not-context-manager
# NOTE: The pylint not-content-manager warning is disabled pending the fix of
# a bug in pylint: https://github.com/PyCQA/pylint/issues/782
# Disable while we have Python 2.x compatability
# pylint: disable=useless-object-inheritance
"""Classes to handle Sonos UPnP Events and Subscriptions."""
from __future__ import unicode_literals
import atexit
import logging
import socket
import threading
import time
import weakref
import requests
from . import config
from .compat import (
Queue, BaseHTTPRequestHandler, URLError, socketserver, urlopen
)
from .data_structures_entry import from_didl_string
from .exceptions import SoCoException, SoCoFault, EventParseException
from .utils import camel_to_underscore
from .xml import XML
log = logging.getLogger(__name__) # pylint: disable=C0103
# pylint: disable=too-many-branches
class Event(object):
"""A read-only object representing a received event.
The values of the evented variables can be accessed via the ``variables``
dict, or as attributes on the instance itself. You should treat all
attributes as read-only.
Args:
sid (str): the subscription id.
seq (str): the event sequence number for that subscription.
timestamp (str): the time that the event was received (from Python's
`time.time` function).
service (str): the service which is subscribed to the event.
variables (dict, optional): contains the ``{names: values}`` of the
evented variables. Defaults to `None`. The values may be
`SoCoFault` objects if the metadata could not be parsed.
Raises:
AttributeError: Not all attributes are returned with each event. An
`AttributeError` will be raised if you attempt to access as an
attribute a variable which was not returned in the event.
Example:
>>> print event.variables['transport_state']
'STOPPED'
>>> print event.transport_state
'STOPPED'
"""
# pylint: disable=too-few-public-methods, too-many-arguments
def __init__(self, sid, seq, service, timestamp, variables=None):
# Initialisation has to be done like this, because __setattr__ is
# overridden, and will not allow direct setting of attributes
self.__dict__['sid'] = sid
self.__dict__['seq'] = seq
self.__dict__['timestamp'] = timestamp
self.__dict__['service'] = service
self.__dict__['variables'] = variables if variables is not None else {}
def __getattr__(self, name):
if name in self.variables:
return self.variables[name]
else:
raise AttributeError('No such attribute: %s' % name)
def __setattr__(self, name, value):
"""Disable (most) attempts to set attributes.
This is not completely foolproof. It just acts as a warning! See
`object.__setattr__`.
"""
raise TypeError('Event object does not support attribute assignment')
class EventServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""A TCP server which handles each new request in a new thread."""
allow_reuse_address = True
class EventNotifyHandler(BaseHTTPRequestHandler):
"""Handles HTTP ``NOTIFY`` Verbs sent to the listener server."""
def do_NOTIFY(self): # pylint: disable=invalid-name
"""Serve a ``NOTIFY`` request.
A ``NOTIFY`` request will be sent by a Sonos device when a state
variable changes. See the `UPnP Spec §4.3 [pdf]
<http://upnp.org/specs/arch/UPnP-arch
-DeviceArchitecture-v1.1.pdf>`_ for details.
"""
timestamp = time.time()
headers = requests.structures.CaseInsensitiveDict(self.headers)
seq = headers['seq'] # Event sequence number
sid = headers['sid'] # Event Subscription Identifier
content_length = int(headers['content-length'])
content = self.rfile.read(content_length)
# Find the relevant service and queue from the sid
with _subscriptions_lock:
subscription = _subscriptions.get(sid)
# It might have been removed by another thread
if subscription:
service = subscription.service
log.info(
"Event %s received for %s service on thread %s at %s", seq,
service.service_id, threading.current_thread(), timestamp)
log.debug("Event content: %s", content)
variables = parse_event_xml(content)
# Build the Event object
event = Event(sid, seq, service, timestamp, variables)
# pass the event details on to the service so it can update its
# cache.
# pylint: disable=protected-access
service._update_cache_on_event(event)
# Put the event on the queue
subscription.events.put(event)
else:
log.info("No service registered for %s", sid)
self.send_response(200)
self.end_headers()
def log_message(self, fmt, *args): # pylint: disable=arguments-differ
# Divert standard webserver logging to the debug log
log.debug(fmt, *args)
class EventServerThread(threading.Thread):
"""The thread in which the event listener server will run."""
def __init__(self, address):
"""
Args:
address (tuple): The (ip, port) address on which the server
should listen.
"""
super(EventServerThread, self).__init__()
#: `threading.Event`: Used to signal that the server should stop.
self.stop_flag = threading.Event()
#: `tuple`: The (ip, port) address on which the server is
#: configured to listen.
self.address = address
def run(self):
"""Start the server on the local IP at port 1400 (default).
Handling of requests is delegated to an instance of the
`EventNotifyHandler` class.
"""
listener = EventServer(self.address, EventNotifyHandler)
log.info("Event listener running on %s", listener.server_address)
# Listen for events until told to stop
while not self.stop_flag.is_set():
listener.handle_request()
class EventListener(object):
"""The Event Listener.
Runs an http server in a thread which is an endpoint for ``NOTIFY``
requests from Sonos devices.
"""
def __init__(self):
super(EventListener, self).__init__()
#: `bool`: Indicates whether the server is currently running
self.is_running = False
self._start_lock = threading.Lock()
self._listener_thread = None
#: `tuple`: The address (ip, port) on which the server is
#: configured to listen.
# Empty for the moment. (It is set in `start`)
self.address = ()
def start(self, any_zone):
"""Start the event listener listening on the local machine at port 1400
(default)
Make sure that your firewall allows connections to this port
Args:
any_zone (SoCo): Any Sonos device on the network. It does not
matter which device. It is used only to find a local IP address
reachable by the Sonos net.
Note:
The port on which the event listener listens is configurable.
See `config.EVENT_LISTENER_PORT`
"""
# Find our local network IP address which is accessible to the
# Sonos net, see http://stackoverflow.com/q/166506
with self._start_lock:
if not self.is_running:
# Use configured IP address if there is one, else detect
# automatically.
if config.EVENT_LISTENER_IP:
ip_address = config.EVENT_LISTENER_IP
else:
temp_sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
temp_sock.connect((any_zone.ip_address,
config.EVENT_LISTENER_PORT))
ip_address = temp_sock.getsockname()[0]
temp_sock.close()
# Start the event listener server in a separate thread.
self.address = (ip_address, config.EVENT_LISTENER_PORT)
self._listener_thread = EventServerThread(self.address)
self._listener_thread.daemon = True
self._listener_thread.start()
self.is_running = True
log.info("Event listener started")
def stop(self):
"""Stop the event listener."""
# Signal the thread to stop before handling the next request
self._listener_thread.stop_flag.set()
# Send a dummy request in case the http server is currently listening
try:
urlopen(
'http://%s:%s/' % (self.address[0], self.address[1]))
except URLError:
# If the server is already shut down, we receive a socket error,
# which we ignore.
pass
# wait for the thread to finish
self._listener_thread.join()
self.is_running = False
log.info("Event listener stopped")
class Subscription(object):
"""A class representing the subscription to a UPnP event."""
# pylint: disable=too-many-instance-attributes
def __init__(self, service, event_queue=None):
"""
Args:
service (Service): The SoCo `Service` to which the subscription
should be made.
event_queue (:class:`~queue.Queue`): A queue on which received
events will be put. If not specified, a queue will be
created and used.
"""
super(Subscription, self).__init__()
self.service = service
#: `str`: A unique ID for this subscription
self.sid = None
#: `int`: The amount of time in seconds until the subscription expires.
self.timeout = None
#: `bool`: An indication of whether the subscription is subscribed.
self.is_subscribed = False
#: :class:`~queue.Queue`: The queue on which events are placed.
self.events = Queue() if event_queue is None else event_queue
#: `int`: The period (seconds) for which the subscription is requested
self.requested_timeout = None
# A flag to make sure that an unsubscribed instance is not
# resubscribed
self._has_been_unsubscribed = False
# The time when the subscription was made
self._timestamp = None
# Used to keep track of the auto_renew thread
self._auto_renew_thread = None
self._auto_renew_thread_flag = threading.Event()
def subscribe(self, requested_timeout=None, auto_renew=False):
"""Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`.
"""
class AutoRenewThread(threading.Thread):
"""Used by the auto_renew code to renew a subscription from within
a thread.
"""
def __init__(self, interval, stop_flag, sub, *args, **kwargs):
super(AutoRenewThread, self).__init__(*args, **kwargs)
self.interval = interval
self.sub = sub
self.stop_flag = stop_flag
self.daemon = True
def run(self):
sub = self.sub
stop_flag = self.stop_flag
interval = self.interval
while not stop_flag.wait(interval):
log.info("Autorenewing subscription %s", sub.sid)
sub.renew()
# TIMEOUT is provided for in the UPnP spec, but it is not clear if
# Sonos pays any attention to it. A timeout of 86400 secs always seems
# to be allocated
self.requested_timeout = requested_timeout
if self._has_been_unsubscribed:
raise SoCoException(
'Cannot resubscribe instance once unsubscribed')
service = self.service
# The event listener must be running, so start it if not
if not event_listener.is_running:
event_listener.start(service.soco)
# an event subscription looks like this:
# SUBSCRIBE publisher path HTTP/1.1
# HOST: publisher host:publisher port
# CALLBACK: <delivery URL>
# NT: upnp:event
# TIMEOUT: Second-requested subscription duration (optional)
# pylint: disable=unbalanced-tuple-unpacking
ip_address, port = event_listener.address
if config.EVENT_ADVERTISE_IP:
ip_address = config.EVENT_ADVERTISE_IP
headers = {
'Callback': '<http://{}:{}>'.format(ip_address, port),
'NT': 'upnp:event'
}
if requested_timeout is not None:
headers["TIMEOUT"] = "Second-{}".format(requested_timeout)
# Lock out EventNotifyHandler during registration
with _subscriptions_lock:
response = requests.request(
'SUBSCRIBE', service.base_url + service.event_subscription_url,
headers=headers)
response.raise_for_status()
self.sid = response.headers['sid']
timeout = response.headers['timeout']
# According to the spec, timeout can be "infinite" or "second-123"
# where 123 is a number of seconds. Sonos uses "Second-123" (with
# a capital letter)
if timeout.lower() == 'infinite':
self.timeout = None
else:
self.timeout = int(timeout.lstrip('Second-'))
self._timestamp = time.time()
self.is_subscribed = True
log.info(
"Subscribed to %s, sid: %s",
service.base_url + service.event_subscription_url, self.sid)
# Add the subscription to the master dict so it can be looked up
# by sid
_subscriptions[self.sid] = self
# Register this subscription to be unsubscribed at exit if still alive
# This will not happen if exit is abnormal (eg in response to a
# signal or fatal interpreter error - see the docs for `atexit`).
atexit.register(self.unsubscribe)
# Set up auto_renew
if not auto_renew:
return
# Autorenew just before expiry, say at 85% of self.timeout seconds
interval = self.timeout * 85 / 100
auto_renew_thread = AutoRenewThread(
interval, self._auto_renew_thread_flag, self)
auto_renew_thread.start()
def renew(self, requested_timeout=None):
"""Renew the event subscription.
You should not try to renew a subscription which has been
unsubscribed, or once it has expired.
Args:
requested_timeout (int, optional): The period for which a renewal
request should be made. If None (the default), use the timeout
requested on subscription.
"""
# NB This code is sometimes called from a separate thread (when
# subscriptions are auto-renewed. Be careful to ensure thread-safety
if self._has_been_unsubscribed:
raise SoCoException(
'Cannot renew subscription once unsubscribed')
if not self.is_subscribed:
raise SoCoException(
'Cannot renew subscription before subscribing')
if self.time_left == 0:
raise SoCoException(
'Cannot renew subscription after expiry')
# SUBSCRIBE publisher path HTTP/1.1
# HOST: publisher host:publisher port
# SID: uuid:subscription UUID
# TIMEOUT: Second-requested subscription duration (optional)
headers = {
'SID': self.sid
}
if requested_timeout is None:
requested_timeout = self.requested_timeout
if requested_timeout is not None:
headers["TIMEOUT"] = "Second-{}".format(requested_timeout)
response = requests.request(
'SUBSCRIBE',
self.service.base_url + self.service.event_subscription_url,
headers=headers)
response.raise_for_status()
timeout = response.headers['timeout']
# According to the spec, timeout can be "infinite" or "second-123"
# where 123 is a number of seconds. Sonos uses "Second-123" (with a
# a capital letter)
if timeout.lower() == 'infinite':
self.timeout = None
else:
self.timeout = int(timeout.lstrip('Second-'))
self._timestamp = time.time()
self.is_subscribed = True
log.info(
"Renewed subscription to %s, sid: %s",
self.service.base_url + self.service.event_subscription_url,
self.sid)
def unsubscribe(self):
"""Unsubscribe from the service's events.
Once unsubscribed, a Subscription instance should not be reused
"""
# Trying to unsubscribe if already unsubscribed, or not yet
# subscribed, fails silently
if self._has_been_unsubscribed or not self.is_subscribed:
return
# Cancel any auto renew
self._auto_renew_thread_flag.set()
# Send an unsubscribe request like this:
# UNSUBSCRIBE publisher path HTTP/1.1
# HOST: publisher host:publisher port
# SID: uuid:subscription UUID
headers = {
'SID': self.sid
}
response = None
try:
response = requests.request(
'UNSUBSCRIBE',
self.service.base_url + self.service.event_subscription_url,
headers=headers,
timeout=3)
except requests.exceptions.RequestException:
pass
self.is_subscribed = False
self._timestamp = None
log.info(
"Unsubscribed from %s, sid: %s",
self.service.base_url + self.service.event_subscription_url,
self.sid)
# remove queue from event queues and sid to service mappings
with _subscriptions_lock:
try:
del _subscriptions[self.sid]
except KeyError:
pass
self._has_been_unsubscribed = True
# Ignore "412 Client Error: Precondition Failed for url:"
# from rebooted speakers.
if response and response.status_code != 412:
response.raise_for_status()
@property
def time_left(self):
"""
`int`: The amount of time left until the subscription expires (seconds)
If the subscription is unsubscribed (or not yet subscribed),
`time_left` is 0.
"""
if self._timestamp is None:
return 0
else:
time_left = self.timeout - (time.time() - self._timestamp)
return time_left if time_left > 0 else 0
def __enter__(self):
if not self.is_subscribed:
self.subscribe()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.unsubscribe()
# pylint: disable=C0103
event_listener = EventListener()
# Thread safe mapping.
# Used to store a mapping of sid to subscription.
_subscriptions = weakref.WeakValueDictionary()
# The lock to go with it
# You must only ever access the mapping in the context of this lock, eg:
# with _subscriptions_lock:
# queue = _subscriptions[sid].events
_subscriptions_lock = threading.Lock()
|
amelchio/pysonos
|
pysonos/events.py
|
Subscription.unsubscribe
|
python
|
def unsubscribe(self):
# Trying to unsubscribe if already unsubscribed, or not yet
# subscribed, fails silently
if self._has_been_unsubscribed or not self.is_subscribed:
return
# Cancel any auto renew
self._auto_renew_thread_flag.set()
# Send an unsubscribe request like this:
# UNSUBSCRIBE publisher path HTTP/1.1
# HOST: publisher host:publisher port
# SID: uuid:subscription UUID
headers = {
'SID': self.sid
}
response = None
try:
response = requests.request(
'UNSUBSCRIBE',
self.service.base_url + self.service.event_subscription_url,
headers=headers,
timeout=3)
except requests.exceptions.RequestException:
pass
self.is_subscribed = False
self._timestamp = None
log.info(
"Unsubscribed from %s, sid: %s",
self.service.base_url + self.service.event_subscription_url,
self.sid)
# remove queue from event queues and sid to service mappings
with _subscriptions_lock:
try:
del _subscriptions[self.sid]
except KeyError:
pass
self._has_been_unsubscribed = True
# Ignore "412 Client Error: Precondition Failed for url:"
# from rebooted speakers.
if response and response.status_code != 412:
response.raise_for_status()
|
Unsubscribe from the service's events.
Once unsubscribed, a Subscription instance should not be reused
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/events.py#L586-L633
| null |
class Subscription(object):
"""A class representing the subscription to a UPnP event."""
# pylint: disable=too-many-instance-attributes
def __init__(self, service, event_queue=None):
"""
Args:
service (Service): The SoCo `Service` to which the subscription
should be made.
event_queue (:class:`~queue.Queue`): A queue on which received
events will be put. If not specified, a queue will be
created and used.
"""
super(Subscription, self).__init__()
self.service = service
#: `str`: A unique ID for this subscription
self.sid = None
#: `int`: The amount of time in seconds until the subscription expires.
self.timeout = None
#: `bool`: An indication of whether the subscription is subscribed.
self.is_subscribed = False
#: :class:`~queue.Queue`: The queue on which events are placed.
self.events = Queue() if event_queue is None else event_queue
#: `int`: The period (seconds) for which the subscription is requested
self.requested_timeout = None
# A flag to make sure that an unsubscribed instance is not
# resubscribed
self._has_been_unsubscribed = False
# The time when the subscription was made
self._timestamp = None
# Used to keep track of the auto_renew thread
self._auto_renew_thread = None
self._auto_renew_thread_flag = threading.Event()
def subscribe(self, requested_timeout=None, auto_renew=False):
"""Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`.
"""
class AutoRenewThread(threading.Thread):
"""Used by the auto_renew code to renew a subscription from within
a thread.
"""
def __init__(self, interval, stop_flag, sub, *args, **kwargs):
super(AutoRenewThread, self).__init__(*args, **kwargs)
self.interval = interval
self.sub = sub
self.stop_flag = stop_flag
self.daemon = True
def run(self):
sub = self.sub
stop_flag = self.stop_flag
interval = self.interval
while not stop_flag.wait(interval):
log.info("Autorenewing subscription %s", sub.sid)
sub.renew()
# TIMEOUT is provided for in the UPnP spec, but it is not clear if
# Sonos pays any attention to it. A timeout of 86400 secs always seems
# to be allocated
self.requested_timeout = requested_timeout
if self._has_been_unsubscribed:
raise SoCoException(
'Cannot resubscribe instance once unsubscribed')
service = self.service
# The event listener must be running, so start it if not
if not event_listener.is_running:
event_listener.start(service.soco)
# an event subscription looks like this:
# SUBSCRIBE publisher path HTTP/1.1
# HOST: publisher host:publisher port
# CALLBACK: <delivery URL>
# NT: upnp:event
# TIMEOUT: Second-requested subscription duration (optional)
# pylint: disable=unbalanced-tuple-unpacking
ip_address, port = event_listener.address
if config.EVENT_ADVERTISE_IP:
ip_address = config.EVENT_ADVERTISE_IP
headers = {
'Callback': '<http://{}:{}>'.format(ip_address, port),
'NT': 'upnp:event'
}
if requested_timeout is not None:
headers["TIMEOUT"] = "Second-{}".format(requested_timeout)
# Lock out EventNotifyHandler during registration
with _subscriptions_lock:
response = requests.request(
'SUBSCRIBE', service.base_url + service.event_subscription_url,
headers=headers)
response.raise_for_status()
self.sid = response.headers['sid']
timeout = response.headers['timeout']
# According to the spec, timeout can be "infinite" or "second-123"
# where 123 is a number of seconds. Sonos uses "Second-123" (with
# a capital letter)
if timeout.lower() == 'infinite':
self.timeout = None
else:
self.timeout = int(timeout.lstrip('Second-'))
self._timestamp = time.time()
self.is_subscribed = True
log.info(
"Subscribed to %s, sid: %s",
service.base_url + service.event_subscription_url, self.sid)
# Add the subscription to the master dict so it can be looked up
# by sid
_subscriptions[self.sid] = self
# Register this subscription to be unsubscribed at exit if still alive
# This will not happen if exit is abnormal (eg in response to a
# signal or fatal interpreter error - see the docs for `atexit`).
atexit.register(self.unsubscribe)
# Set up auto_renew
if not auto_renew:
return
# Autorenew just before expiry, say at 85% of self.timeout seconds
interval = self.timeout * 85 / 100
auto_renew_thread = AutoRenewThread(
interval, self._auto_renew_thread_flag, self)
auto_renew_thread.start()
def renew(self, requested_timeout=None):
"""Renew the event subscription.
You should not try to renew a subscription which has been
unsubscribed, or once it has expired.
Args:
requested_timeout (int, optional): The period for which a renewal
request should be made. If None (the default), use the timeout
requested on subscription.
"""
# NB This code is sometimes called from a separate thread (when
# subscriptions are auto-renewed. Be careful to ensure thread-safety
if self._has_been_unsubscribed:
raise SoCoException(
'Cannot renew subscription once unsubscribed')
if not self.is_subscribed:
raise SoCoException(
'Cannot renew subscription before subscribing')
if self.time_left == 0:
raise SoCoException(
'Cannot renew subscription after expiry')
# SUBSCRIBE publisher path HTTP/1.1
# HOST: publisher host:publisher port
# SID: uuid:subscription UUID
# TIMEOUT: Second-requested subscription duration (optional)
headers = {
'SID': self.sid
}
if requested_timeout is None:
requested_timeout = self.requested_timeout
if requested_timeout is not None:
headers["TIMEOUT"] = "Second-{}".format(requested_timeout)
response = requests.request(
'SUBSCRIBE',
self.service.base_url + self.service.event_subscription_url,
headers=headers)
response.raise_for_status()
timeout = response.headers['timeout']
# According to the spec, timeout can be "infinite" or "second-123"
# where 123 is a number of seconds. Sonos uses "Second-123" (with a
# a capital letter)
if timeout.lower() == 'infinite':
self.timeout = None
else:
self.timeout = int(timeout.lstrip('Second-'))
self._timestamp = time.time()
self.is_subscribed = True
log.info(
"Renewed subscription to %s, sid: %s",
self.service.base_url + self.service.event_subscription_url,
self.sid)
@property
def time_left(self):
"""
`int`: The amount of time left until the subscription expires (seconds)
If the subscription is unsubscribed (or not yet subscribed),
`time_left` is 0.
"""
if self._timestamp is None:
return 0
else:
time_left = self.timeout - (time.time() - self._timestamp)
return time_left if time_left > 0 else 0
def __enter__(self):
if not self.is_subscribed:
self.subscribe()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.unsubscribe()
|
amelchio/pysonos
|
pysonos/core.py
|
SoCo.play_mode
|
python
|
def play_mode(self, playmode):
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
|
Set the speaker's mode.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L408-L417
| null |
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
.. rubric:: Basic Methods
.. autosummary::
play_from_queue
play
play_uri
pause
stop
seek
next
previous
mute
volume
play_mode
cross_fade
ramp_to_volume
get_current_track_info
get_speaker_info
get_current_transport_info
.. rubric:: Queue Management
.. autosummary::
get_queue
queue_size
add_to_queue
add_uri_to_queue
add_multiple_to_queue
remove_from_queue
clear_queue
.. rubric:: Group Management
.. autosummary::
group
partymode
join
unjoin
all_groups
all_zones
visible_zones
.. rubric:: Player Identity and Settings
.. autosummary::
player_name
uid
household_id
is_visible
is_bridge
is_coordinator
is_soundbar
bass
treble
loudness
night_mode
dialog_mode
status_light
.. rubric:: Playlists and Favorites
.. autosummary::
get_sonos_playlists
create_sonos_playlist
create_sonos_playlist_from_queue
remove_sonos_playlist
add_item_to_sonos_playlist
reorder_sonos_playlist
clear_sonos_playlist
move_in_sonos_playlist
remove_from_sonos_playlist
get_sonos_playlist_by_attr
get_favorite_radio_shows
get_favorite_radio_stations
get_sonos_favorites
.. rubric:: Miscellaneous
.. autosummary::
switch_to_line_in
is_playing_radio
is_playing_line_in
is_playing_tv
switch_to_tv
set_sleep_timer
get_sleep_timer
.. warning::
Properties on this object are not generally cached and may obtain
information over the network, so may take longer than expected to set
or return a value. It may be a good idea for you to cache the value in
your own code.
.. note::
Since all methods/properties on this object will result in an UPnP
request, they might result in an exception without it being mentioned
in the Raises section.
In most cases, the exception will be a
:class:`soco.exceptions.SoCoUPnPException`
(if the player returns an UPnP error code), but in special cases
it might also be another :class:`soco.exceptions.SoCoException`
or even a `requests` exception.
"""
_class_group = 'SoCo'
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = Cache(default_timeout=5)
self._zgs_result = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
"""str: The speaker's name."""
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
"""Set the speaker's name."""
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
"""str: A unique identifier.
Looks like: ``'RINCON_000XXXXXXXXXX1400'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def household_id(self):
"""str: A unique identifier for all players in a household.
Looks like: ``'Sonos_asahHKgjgJGjgjGjggjJgjJG34'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, return the cached version
if self._household_id is None:
self._household_id = self.deviceProperties.GetHouseholdID()[
'CurrentHouseholdID']
return self._household_id
@property
def is_visible(self):
"""bool: Is this zone visible?
A zone might be invisible if, for example, it is a bridge, or the slave
part of stereo pair.
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
"""bool: Is this zone a group coordinator?"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def is_soundbar(self):
"""bool: Is this zone a soundbar (i.e. has night mode etc.)?"""
if self._is_soundbar is None:
if not self.speaker_info:
self.get_speaker_info()
model_name = self.speaker_info['model_name'].lower()
self._is_soundbar = any(model_name.endswith(s) for s in SOUNDBARS)
return self._is_soundbar
@property
def play_mode(self):
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
@property
def shuffle(self):
"""bool: The queue's shuffle option.
True if enabled, False otherwise.
"""
return PLAY_MODES[self.play_mode][0]
@shuffle.setter
def shuffle(self, shuffle):
"""Set the queue's shuffle option."""
repeat = self.repeat
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
def repeat(self):
"""bool: The queue's repeat option.
True if enabled, False otherwise.
Might also be ``'ONE'`` if repeating the same title is enabled
(not supported by the official controller).
"""
return PLAY_MODES[self.play_mode][1]
@repeat.setter
def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
@only_on_master # Only for symmetry with the setter
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state))
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def ramp_to_volume(self, volume, ramp_type='SLEEP_TIMER_RAMP_TYPE'):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume([
('InstanceID', 0),
('Channel', 'Master'),
('RampType', ramp_type),
('DesiredVolume', volume),
('ResetVolumeAfter', False),
('ProgramURI', '')
])
return int(response['RampTime'])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri='', meta='', title='', start=True,
force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream given by
the URI. For some streams at least a title is required as metadata.
This can be provided using the `meta` argument or the `title` argument.
If the `title` argument is provided minimal metadata will be generated.
If `meta` argument is provided the `title` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
`x-sonosapi-stream:`, `x-sonosapi-radio:`, `x-rincon-mp3radio:`,
`hls-radio:` default to radio or smart radio format depending on the
stream. Others default to track format: `x-file-cifs:`, `aac:`,
`http:`, `https:`, `x-sonos-spotify:` (used by Spotify),
`x-sonosapi-hls-static:` (Amazon Prime),
`x-sonos-http:` (Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically `http:`, `https:` or `aac:`.
To force display and controls to Radio format set `force_radio=True`
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonos® (as of at least version 6.4.2) means that
the devices no longer accepts ordinary `http:` and `https:` URIs for
radio stations. This method has the option to replaces these
prefixes with the one that Sonos® expects: `x-rincon-mp3radio:` by
using the "force_radio=True" parameter.
A few streams may fail if not forced to to Radio format.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(
title=escape(title),
service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(':')
if colon > 0:
uri = 'x-rincon-mp3radio{0}'.format(uri[colon:])
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def previous(self):
"""Go back to the previously played track.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state))
@mute.setter
def mute(self, mute):
"""Mute (or unmute) the speaker."""
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
"""int: The speaker's volume.
An integer between 0 and 100.
"""
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
"""Set the speaker's volume."""
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
"""Set the speaker's bass."""
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
"""int: The speaker's treble EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
"""Set the speaker's treble."""
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
def loudness(self):
"""bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness))
@loudness.setter
def loudness(self, loudness):
"""Switch on/off the speaker's loudness compensation."""
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
@property
def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'NightMode')
])
return bool(int(response['CurrentValue']))
@night_mode.setter
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = 'This device does not support night mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'NightMode'),
('DesiredValue', int(night_mode))
])
@property
def dialog_mode(self):
"""bool: Get the Sonos speaker's dialog mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel')
])
return bool(int(response['CurrentValue']))
@dialog_mode.setter
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = 'This device does not support dialog mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel'),
('DesiredValue', int(dialog_mode))
])
def _parse_zone_group_state(self):
"""The Zone Group State contains a lot of useful information.
Retrieve and parse it, and populate the relevant properties.
"""
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
"""Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it."""
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# share our cache
zone._zgs_cache = self._zgs_cache
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = (member_attribs.get('Invisible') != '1')
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache=self._zgs_cache)['ZoneGroupState']
if zgs == self._zgs_result:
return
self._zgs_result = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# With some versions, the response is wrapped in ZoneGroupState
tree = tree.find('ZoneGroups') or tree
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = (
member_element.attrib.get('IsZoneBridge') == '1')
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
"""set of :class:`soco.groups.ZoneGroup`: All available groups."""
self._parse_zone_group_state()
return self._groups.copy()
@property
def group(self):
""":class:`soco.groups.ZoneGroup`: The Zone Group of which this device
is a member.
None if this zone is a slave in a stereo pair.
"""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All available zones."""
self._parse_zone_group_state()
return self._all_zones.copy()
@property
def visible_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All visible zones."""
self._parse_zone_group_state()
return self._visible_zones.copy()
def partymode(self):
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
"""Join this speaker to another "master" speaker."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def unjoin(self):
"""Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
])
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-mp3radio:', track_uri) is not None
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-stream:', track_uri) is not None
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-sonos-htastream:', track_uri) is not None
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent') or ''
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = \
self.music_library.build_album_art_full_uri(album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/xml/device_description.xml',
timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}iconList/'
'{urn:schemas-upnp-org:device-1-0}icon/'
'{urn:schemas-upnp-org:device-1-0}url'
)
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for
`get_music_library_information('sonos_playlists')`.
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.music_library.get_music_library_information(*args,
**kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if `play_mode=SHUFFLE`.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', position),
('EnqueueAsNext', int(as_next))
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = '' # Sonos seems to accept this as well
container_metadata = '' # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index:index + chunk_size]
uris = ' '.join([item.resources[0].uri for item in chunk])
uri_metadata = ' '.join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue([
('InstanceID', 0),
('UpdateID', 0),
('NumberOfURIs', len(chunk)),
('EnqueuedURIs', uris),
('EnqueuedURIsMetaData', uri_metadata),
('ContainerURI', container_uri),
('ContainerMetaData', container_metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 0)
])
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
@deprecated('0.13', "soco.music_library.get_favorite_radio_shows", '0.15')
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated('0.13', "soco.music_library.get_favorite_radio_stations",
'0.15')
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated('0.13', "soco.music_library.get_sonos_favorites", '0.15')
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(
sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
('AddAtIndex', 4294967295)
])
@only_on_master
def set_sleep_timer(self, sleep_time_seconds):
"""Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
"""
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
Returns:
int or NoneType: Number of seconds left in timer. If there is no
sleep timer currently set it will return None.
"""
resp = self.avTransport.GetRemainingSleepTimerDuration([
('InstanceID', 0),
])
if resp['RemainingSleepTimerDuration']:
times = resp['RemainingSleepTimerDuration'].split(':')
return (int(times[0]) * 3600 +
int(times[1]) * 60 +
int(times[2]))
else:
return None
@only_on_master
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response
@only_on_master
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr('item_id',
sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ','.join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks,
new_pos='', update_id=update_id)
else:
return {'change': 0, 'update_id': update_id, 'length': count}
@only_on_master
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos,
update_id=0):
"""Move a track to a new position within a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): **0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
new_pos (int): **0**-based location to move the track.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track),
int(new_pos), update_id)
@only_on_master
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id)
@only_on_master
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match))
|
amelchio/pysonos
|
pysonos/core.py
|
SoCo.repeat
|
python
|
def repeat(self, repeat):
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
|
Set the queue's repeat option
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L444-L447
| null |
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
.. rubric:: Basic Methods
.. autosummary::
play_from_queue
play
play_uri
pause
stop
seek
next
previous
mute
volume
play_mode
cross_fade
ramp_to_volume
get_current_track_info
get_speaker_info
get_current_transport_info
.. rubric:: Queue Management
.. autosummary::
get_queue
queue_size
add_to_queue
add_uri_to_queue
add_multiple_to_queue
remove_from_queue
clear_queue
.. rubric:: Group Management
.. autosummary::
group
partymode
join
unjoin
all_groups
all_zones
visible_zones
.. rubric:: Player Identity and Settings
.. autosummary::
player_name
uid
household_id
is_visible
is_bridge
is_coordinator
is_soundbar
bass
treble
loudness
night_mode
dialog_mode
status_light
.. rubric:: Playlists and Favorites
.. autosummary::
get_sonos_playlists
create_sonos_playlist
create_sonos_playlist_from_queue
remove_sonos_playlist
add_item_to_sonos_playlist
reorder_sonos_playlist
clear_sonos_playlist
move_in_sonos_playlist
remove_from_sonos_playlist
get_sonos_playlist_by_attr
get_favorite_radio_shows
get_favorite_radio_stations
get_sonos_favorites
.. rubric:: Miscellaneous
.. autosummary::
switch_to_line_in
is_playing_radio
is_playing_line_in
is_playing_tv
switch_to_tv
set_sleep_timer
get_sleep_timer
.. warning::
Properties on this object are not generally cached and may obtain
information over the network, so may take longer than expected to set
or return a value. It may be a good idea for you to cache the value in
your own code.
.. note::
Since all methods/properties on this object will result in an UPnP
request, they might result in an exception without it being mentioned
in the Raises section.
In most cases, the exception will be a
:class:`soco.exceptions.SoCoUPnPException`
(if the player returns an UPnP error code), but in special cases
it might also be another :class:`soco.exceptions.SoCoException`
or even a `requests` exception.
"""
_class_group = 'SoCo'
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = Cache(default_timeout=5)
self._zgs_result = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
"""str: The speaker's name."""
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
"""Set the speaker's name."""
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
"""str: A unique identifier.
Looks like: ``'RINCON_000XXXXXXXXXX1400'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def household_id(self):
"""str: A unique identifier for all players in a household.
Looks like: ``'Sonos_asahHKgjgJGjgjGjggjJgjJG34'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, return the cached version
if self._household_id is None:
self._household_id = self.deviceProperties.GetHouseholdID()[
'CurrentHouseholdID']
return self._household_id
@property
def is_visible(self):
"""bool: Is this zone visible?
A zone might be invisible if, for example, it is a bridge, or the slave
part of stereo pair.
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
"""bool: Is this zone a group coordinator?"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def is_soundbar(self):
"""bool: Is this zone a soundbar (i.e. has night mode etc.)?"""
if self._is_soundbar is None:
if not self.speaker_info:
self.get_speaker_info()
model_name = self.speaker_info['model_name'].lower()
self._is_soundbar = any(model_name.endswith(s) for s in SOUNDBARS)
return self._is_soundbar
@property
def play_mode(self):
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
@property
def shuffle(self):
"""bool: The queue's shuffle option.
True if enabled, False otherwise.
"""
return PLAY_MODES[self.play_mode][0]
@shuffle.setter
def shuffle(self, shuffle):
"""Set the queue's shuffle option."""
repeat = self.repeat
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
def repeat(self):
"""bool: The queue's repeat option.
True if enabled, False otherwise.
Might also be ``'ONE'`` if repeating the same title is enabled
(not supported by the official controller).
"""
return PLAY_MODES[self.play_mode][1]
@repeat.setter
@property
@only_on_master # Only for symmetry with the setter
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state))
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def ramp_to_volume(self, volume, ramp_type='SLEEP_TIMER_RAMP_TYPE'):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume([
('InstanceID', 0),
('Channel', 'Master'),
('RampType', ramp_type),
('DesiredVolume', volume),
('ResetVolumeAfter', False),
('ProgramURI', '')
])
return int(response['RampTime'])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri='', meta='', title='', start=True,
force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream given by
the URI. For some streams at least a title is required as metadata.
This can be provided using the `meta` argument or the `title` argument.
If the `title` argument is provided minimal metadata will be generated.
If `meta` argument is provided the `title` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
`x-sonosapi-stream:`, `x-sonosapi-radio:`, `x-rincon-mp3radio:`,
`hls-radio:` default to radio or smart radio format depending on the
stream. Others default to track format: `x-file-cifs:`, `aac:`,
`http:`, `https:`, `x-sonos-spotify:` (used by Spotify),
`x-sonosapi-hls-static:` (Amazon Prime),
`x-sonos-http:` (Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically `http:`, `https:` or `aac:`.
To force display and controls to Radio format set `force_radio=True`
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonos® (as of at least version 6.4.2) means that
the devices no longer accepts ordinary `http:` and `https:` URIs for
radio stations. This method has the option to replaces these
prefixes with the one that Sonos® expects: `x-rincon-mp3radio:` by
using the "force_radio=True" parameter.
A few streams may fail if not forced to to Radio format.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(
title=escape(title),
service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(':')
if colon > 0:
uri = 'x-rincon-mp3radio{0}'.format(uri[colon:])
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def previous(self):
"""Go back to the previously played track.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state))
@mute.setter
def mute(self, mute):
"""Mute (or unmute) the speaker."""
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
"""int: The speaker's volume.
An integer between 0 and 100.
"""
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
"""Set the speaker's volume."""
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
"""Set the speaker's bass."""
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
"""int: The speaker's treble EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
"""Set the speaker's treble."""
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
def loudness(self):
"""bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness))
@loudness.setter
def loudness(self, loudness):
"""Switch on/off the speaker's loudness compensation."""
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
@property
def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'NightMode')
])
return bool(int(response['CurrentValue']))
@night_mode.setter
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = 'This device does not support night mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'NightMode'),
('DesiredValue', int(night_mode))
])
@property
def dialog_mode(self):
"""bool: Get the Sonos speaker's dialog mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel')
])
return bool(int(response['CurrentValue']))
@dialog_mode.setter
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = 'This device does not support dialog mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel'),
('DesiredValue', int(dialog_mode))
])
def _parse_zone_group_state(self):
"""The Zone Group State contains a lot of useful information.
Retrieve and parse it, and populate the relevant properties.
"""
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
"""Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it."""
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# share our cache
zone._zgs_cache = self._zgs_cache
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = (member_attribs.get('Invisible') != '1')
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache=self._zgs_cache)['ZoneGroupState']
if zgs == self._zgs_result:
return
self._zgs_result = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# With some versions, the response is wrapped in ZoneGroupState
tree = tree.find('ZoneGroups') or tree
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = (
member_element.attrib.get('IsZoneBridge') == '1')
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
"""set of :class:`soco.groups.ZoneGroup`: All available groups."""
self._parse_zone_group_state()
return self._groups.copy()
@property
def group(self):
""":class:`soco.groups.ZoneGroup`: The Zone Group of which this device
is a member.
None if this zone is a slave in a stereo pair.
"""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All available zones."""
self._parse_zone_group_state()
return self._all_zones.copy()
@property
def visible_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All visible zones."""
self._parse_zone_group_state()
return self._visible_zones.copy()
def partymode(self):
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
"""Join this speaker to another "master" speaker."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def unjoin(self):
"""Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
])
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-mp3radio:', track_uri) is not None
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-stream:', track_uri) is not None
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-sonos-htastream:', track_uri) is not None
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent') or ''
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = \
self.music_library.build_album_art_full_uri(album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/xml/device_description.xml',
timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}iconList/'
'{urn:schemas-upnp-org:device-1-0}icon/'
'{urn:schemas-upnp-org:device-1-0}url'
)
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for
`get_music_library_information('sonos_playlists')`.
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.music_library.get_music_library_information(*args,
**kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if `play_mode=SHUFFLE`.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', position),
('EnqueueAsNext', int(as_next))
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = '' # Sonos seems to accept this as well
container_metadata = '' # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index:index + chunk_size]
uris = ' '.join([item.resources[0].uri for item in chunk])
uri_metadata = ' '.join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue([
('InstanceID', 0),
('UpdateID', 0),
('NumberOfURIs', len(chunk)),
('EnqueuedURIs', uris),
('EnqueuedURIsMetaData', uri_metadata),
('ContainerURI', container_uri),
('ContainerMetaData', container_metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 0)
])
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
@deprecated('0.13', "soco.music_library.get_favorite_radio_shows", '0.15')
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated('0.13', "soco.music_library.get_favorite_radio_stations",
'0.15')
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated('0.13', "soco.music_library.get_sonos_favorites", '0.15')
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(
sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
('AddAtIndex', 4294967295)
])
@only_on_master
def set_sleep_timer(self, sleep_time_seconds):
"""Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
"""
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
Returns:
int or NoneType: Number of seconds left in timer. If there is no
sleep timer currently set it will return None.
"""
resp = self.avTransport.GetRemainingSleepTimerDuration([
('InstanceID', 0),
])
if resp['RemainingSleepTimerDuration']:
times = resp['RemainingSleepTimerDuration'].split(':')
return (int(times[0]) * 3600 +
int(times[1]) * 60 +
int(times[2]))
else:
return None
@only_on_master
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response
@only_on_master
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr('item_id',
sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ','.join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks,
new_pos='', update_id=update_id)
else:
return {'change': 0, 'update_id': update_id, 'length': count}
@only_on_master
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos,
update_id=0):
"""Move a track to a new position within a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): **0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
new_pos (int): **0**-based location to move the track.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track),
int(new_pos), update_id)
@only_on_master
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id)
@only_on_master
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match))
|
amelchio/pysonos
|
pysonos/core.py
|
SoCo.cross_fade
|
python
|
def cross_fade(self):
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state))
|
bool: The speaker's cross fade state.
True if enabled, False otherwise
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L451-L461
| null |
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
.. rubric:: Basic Methods
.. autosummary::
play_from_queue
play
play_uri
pause
stop
seek
next
previous
mute
volume
play_mode
cross_fade
ramp_to_volume
get_current_track_info
get_speaker_info
get_current_transport_info
.. rubric:: Queue Management
.. autosummary::
get_queue
queue_size
add_to_queue
add_uri_to_queue
add_multiple_to_queue
remove_from_queue
clear_queue
.. rubric:: Group Management
.. autosummary::
group
partymode
join
unjoin
all_groups
all_zones
visible_zones
.. rubric:: Player Identity and Settings
.. autosummary::
player_name
uid
household_id
is_visible
is_bridge
is_coordinator
is_soundbar
bass
treble
loudness
night_mode
dialog_mode
status_light
.. rubric:: Playlists and Favorites
.. autosummary::
get_sonos_playlists
create_sonos_playlist
create_sonos_playlist_from_queue
remove_sonos_playlist
add_item_to_sonos_playlist
reorder_sonos_playlist
clear_sonos_playlist
move_in_sonos_playlist
remove_from_sonos_playlist
get_sonos_playlist_by_attr
get_favorite_radio_shows
get_favorite_radio_stations
get_sonos_favorites
.. rubric:: Miscellaneous
.. autosummary::
switch_to_line_in
is_playing_radio
is_playing_line_in
is_playing_tv
switch_to_tv
set_sleep_timer
get_sleep_timer
.. warning::
Properties on this object are not generally cached and may obtain
information over the network, so may take longer than expected to set
or return a value. It may be a good idea for you to cache the value in
your own code.
.. note::
Since all methods/properties on this object will result in an UPnP
request, they might result in an exception without it being mentioned
in the Raises section.
In most cases, the exception will be a
:class:`soco.exceptions.SoCoUPnPException`
(if the player returns an UPnP error code), but in special cases
it might also be another :class:`soco.exceptions.SoCoException`
or even a `requests` exception.
"""
_class_group = 'SoCo'
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = Cache(default_timeout=5)
self._zgs_result = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
"""str: The speaker's name."""
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
"""Set the speaker's name."""
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
"""str: A unique identifier.
Looks like: ``'RINCON_000XXXXXXXXXX1400'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def household_id(self):
"""str: A unique identifier for all players in a household.
Looks like: ``'Sonos_asahHKgjgJGjgjGjggjJgjJG34'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, return the cached version
if self._household_id is None:
self._household_id = self.deviceProperties.GetHouseholdID()[
'CurrentHouseholdID']
return self._household_id
@property
def is_visible(self):
"""bool: Is this zone visible?
A zone might be invisible if, for example, it is a bridge, or the slave
part of stereo pair.
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
"""bool: Is this zone a group coordinator?"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def is_soundbar(self):
"""bool: Is this zone a soundbar (i.e. has night mode etc.)?"""
if self._is_soundbar is None:
if not self.speaker_info:
self.get_speaker_info()
model_name = self.speaker_info['model_name'].lower()
self._is_soundbar = any(model_name.endswith(s) for s in SOUNDBARS)
return self._is_soundbar
@property
def play_mode(self):
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
@property
def shuffle(self):
"""bool: The queue's shuffle option.
True if enabled, False otherwise.
"""
return PLAY_MODES[self.play_mode][0]
@shuffle.setter
def shuffle(self, shuffle):
"""Set the queue's shuffle option."""
repeat = self.repeat
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
def repeat(self):
"""bool: The queue's repeat option.
True if enabled, False otherwise.
Might also be ``'ONE'`` if repeating the same title is enabled
(not supported by the official controller).
"""
return PLAY_MODES[self.play_mode][1]
@repeat.setter
def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
@only_on_master # Only for symmetry with the setter
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def ramp_to_volume(self, volume, ramp_type='SLEEP_TIMER_RAMP_TYPE'):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume([
('InstanceID', 0),
('Channel', 'Master'),
('RampType', ramp_type),
('DesiredVolume', volume),
('ResetVolumeAfter', False),
('ProgramURI', '')
])
return int(response['RampTime'])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri='', meta='', title='', start=True,
force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream given by
the URI. For some streams at least a title is required as metadata.
This can be provided using the `meta` argument or the `title` argument.
If the `title` argument is provided minimal metadata will be generated.
If `meta` argument is provided the `title` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
`x-sonosapi-stream:`, `x-sonosapi-radio:`, `x-rincon-mp3radio:`,
`hls-radio:` default to radio or smart radio format depending on the
stream. Others default to track format: `x-file-cifs:`, `aac:`,
`http:`, `https:`, `x-sonos-spotify:` (used by Spotify),
`x-sonosapi-hls-static:` (Amazon Prime),
`x-sonos-http:` (Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically `http:`, `https:` or `aac:`.
To force display and controls to Radio format set `force_radio=True`
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonos® (as of at least version 6.4.2) means that
the devices no longer accepts ordinary `http:` and `https:` URIs for
radio stations. This method has the option to replaces these
prefixes with the one that Sonos® expects: `x-rincon-mp3radio:` by
using the "force_radio=True" parameter.
A few streams may fail if not forced to to Radio format.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(
title=escape(title),
service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(':')
if colon > 0:
uri = 'x-rincon-mp3radio{0}'.format(uri[colon:])
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def previous(self):
"""Go back to the previously played track.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state))
@mute.setter
def mute(self, mute):
"""Mute (or unmute) the speaker."""
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
"""int: The speaker's volume.
An integer between 0 and 100.
"""
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
"""Set the speaker's volume."""
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
"""Set the speaker's bass."""
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
"""int: The speaker's treble EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
"""Set the speaker's treble."""
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
def loudness(self):
"""bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness))
@loudness.setter
def loudness(self, loudness):
"""Switch on/off the speaker's loudness compensation."""
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
@property
def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'NightMode')
])
return bool(int(response['CurrentValue']))
@night_mode.setter
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = 'This device does not support night mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'NightMode'),
('DesiredValue', int(night_mode))
])
@property
def dialog_mode(self):
"""bool: Get the Sonos speaker's dialog mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel')
])
return bool(int(response['CurrentValue']))
@dialog_mode.setter
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = 'This device does not support dialog mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel'),
('DesiredValue', int(dialog_mode))
])
def _parse_zone_group_state(self):
"""The Zone Group State contains a lot of useful information.
Retrieve and parse it, and populate the relevant properties.
"""
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
"""Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it."""
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# share our cache
zone._zgs_cache = self._zgs_cache
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = (member_attribs.get('Invisible') != '1')
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache=self._zgs_cache)['ZoneGroupState']
if zgs == self._zgs_result:
return
self._zgs_result = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# With some versions, the response is wrapped in ZoneGroupState
tree = tree.find('ZoneGroups') or tree
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = (
member_element.attrib.get('IsZoneBridge') == '1')
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
"""set of :class:`soco.groups.ZoneGroup`: All available groups."""
self._parse_zone_group_state()
return self._groups.copy()
@property
def group(self):
""":class:`soco.groups.ZoneGroup`: The Zone Group of which this device
is a member.
None if this zone is a slave in a stereo pair.
"""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All available zones."""
self._parse_zone_group_state()
return self._all_zones.copy()
@property
def visible_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All visible zones."""
self._parse_zone_group_state()
return self._visible_zones.copy()
def partymode(self):
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
"""Join this speaker to another "master" speaker."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def unjoin(self):
"""Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
])
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-mp3radio:', track_uri) is not None
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-stream:', track_uri) is not None
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-sonos-htastream:', track_uri) is not None
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent') or ''
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = \
self.music_library.build_album_art_full_uri(album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/xml/device_description.xml',
timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}iconList/'
'{urn:schemas-upnp-org:device-1-0}icon/'
'{urn:schemas-upnp-org:device-1-0}url'
)
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for
`get_music_library_information('sonos_playlists')`.
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.music_library.get_music_library_information(*args,
**kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if `play_mode=SHUFFLE`.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', position),
('EnqueueAsNext', int(as_next))
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = '' # Sonos seems to accept this as well
container_metadata = '' # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index:index + chunk_size]
uris = ' '.join([item.resources[0].uri for item in chunk])
uri_metadata = ' '.join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue([
('InstanceID', 0),
('UpdateID', 0),
('NumberOfURIs', len(chunk)),
('EnqueuedURIs', uris),
('EnqueuedURIsMetaData', uri_metadata),
('ContainerURI', container_uri),
('ContainerMetaData', container_metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 0)
])
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
@deprecated('0.13', "soco.music_library.get_favorite_radio_shows", '0.15')
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated('0.13', "soco.music_library.get_favorite_radio_stations",
'0.15')
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated('0.13', "soco.music_library.get_sonos_favorites", '0.15')
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(
sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
('AddAtIndex', 4294967295)
])
@only_on_master
def set_sleep_timer(self, sleep_time_seconds):
"""Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
"""
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
Returns:
int or NoneType: Number of seconds left in timer. If there is no
sleep timer currently set it will return None.
"""
resp = self.avTransport.GetRemainingSleepTimerDuration([
('InstanceID', 0),
])
if resp['RemainingSleepTimerDuration']:
times = resp['RemainingSleepTimerDuration'].split(':')
return (int(times[0]) * 3600 +
int(times[1]) * 60 +
int(times[2]))
else:
return None
@only_on_master
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response
@only_on_master
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr('item_id',
sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ','.join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks,
new_pos='', update_id=update_id)
else:
return {'change': 0, 'update_id': update_id, 'length': count}
@only_on_master
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos,
update_id=0):
"""Move a track to a new position within a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): **0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
new_pos (int): **0**-based location to move the track.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track),
int(new_pos), update_id)
@only_on_master
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id)
@only_on_master
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match))
|
amelchio/pysonos
|
pysonos/core.py
|
SoCo.mute
|
python
|
def mute(self):
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state))
|
bool: The speaker's mute state.
True if muted, False otherwise.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L705-L716
| null |
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
.. rubric:: Basic Methods
.. autosummary::
play_from_queue
play
play_uri
pause
stop
seek
next
previous
mute
volume
play_mode
cross_fade
ramp_to_volume
get_current_track_info
get_speaker_info
get_current_transport_info
.. rubric:: Queue Management
.. autosummary::
get_queue
queue_size
add_to_queue
add_uri_to_queue
add_multiple_to_queue
remove_from_queue
clear_queue
.. rubric:: Group Management
.. autosummary::
group
partymode
join
unjoin
all_groups
all_zones
visible_zones
.. rubric:: Player Identity and Settings
.. autosummary::
player_name
uid
household_id
is_visible
is_bridge
is_coordinator
is_soundbar
bass
treble
loudness
night_mode
dialog_mode
status_light
.. rubric:: Playlists and Favorites
.. autosummary::
get_sonos_playlists
create_sonos_playlist
create_sonos_playlist_from_queue
remove_sonos_playlist
add_item_to_sonos_playlist
reorder_sonos_playlist
clear_sonos_playlist
move_in_sonos_playlist
remove_from_sonos_playlist
get_sonos_playlist_by_attr
get_favorite_radio_shows
get_favorite_radio_stations
get_sonos_favorites
.. rubric:: Miscellaneous
.. autosummary::
switch_to_line_in
is_playing_radio
is_playing_line_in
is_playing_tv
switch_to_tv
set_sleep_timer
get_sleep_timer
.. warning::
Properties on this object are not generally cached and may obtain
information over the network, so may take longer than expected to set
or return a value. It may be a good idea for you to cache the value in
your own code.
.. note::
Since all methods/properties on this object will result in an UPnP
request, they might result in an exception without it being mentioned
in the Raises section.
In most cases, the exception will be a
:class:`soco.exceptions.SoCoUPnPException`
(if the player returns an UPnP error code), but in special cases
it might also be another :class:`soco.exceptions.SoCoException`
or even a `requests` exception.
"""
_class_group = 'SoCo'
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = Cache(default_timeout=5)
self._zgs_result = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
"""str: The speaker's name."""
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
"""Set the speaker's name."""
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
"""str: A unique identifier.
Looks like: ``'RINCON_000XXXXXXXXXX1400'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def household_id(self):
"""str: A unique identifier for all players in a household.
Looks like: ``'Sonos_asahHKgjgJGjgjGjggjJgjJG34'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, return the cached version
if self._household_id is None:
self._household_id = self.deviceProperties.GetHouseholdID()[
'CurrentHouseholdID']
return self._household_id
@property
def is_visible(self):
"""bool: Is this zone visible?
A zone might be invisible if, for example, it is a bridge, or the slave
part of stereo pair.
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
"""bool: Is this zone a group coordinator?"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def is_soundbar(self):
"""bool: Is this zone a soundbar (i.e. has night mode etc.)?"""
if self._is_soundbar is None:
if not self.speaker_info:
self.get_speaker_info()
model_name = self.speaker_info['model_name'].lower()
self._is_soundbar = any(model_name.endswith(s) for s in SOUNDBARS)
return self._is_soundbar
@property
def play_mode(self):
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
@property
def shuffle(self):
"""bool: The queue's shuffle option.
True if enabled, False otherwise.
"""
return PLAY_MODES[self.play_mode][0]
@shuffle.setter
def shuffle(self, shuffle):
"""Set the queue's shuffle option."""
repeat = self.repeat
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
def repeat(self):
"""bool: The queue's repeat option.
True if enabled, False otherwise.
Might also be ``'ONE'`` if repeating the same title is enabled
(not supported by the official controller).
"""
return PLAY_MODES[self.play_mode][1]
@repeat.setter
def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
@only_on_master # Only for symmetry with the setter
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state))
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def ramp_to_volume(self, volume, ramp_type='SLEEP_TIMER_RAMP_TYPE'):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume([
('InstanceID', 0),
('Channel', 'Master'),
('RampType', ramp_type),
('DesiredVolume', volume),
('ResetVolumeAfter', False),
('ProgramURI', '')
])
return int(response['RampTime'])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri='', meta='', title='', start=True,
force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream given by
the URI. For some streams at least a title is required as metadata.
This can be provided using the `meta` argument or the `title` argument.
If the `title` argument is provided minimal metadata will be generated.
If `meta` argument is provided the `title` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
`x-sonosapi-stream:`, `x-sonosapi-radio:`, `x-rincon-mp3radio:`,
`hls-radio:` default to radio or smart radio format depending on the
stream. Others default to track format: `x-file-cifs:`, `aac:`,
`http:`, `https:`, `x-sonos-spotify:` (used by Spotify),
`x-sonosapi-hls-static:` (Amazon Prime),
`x-sonos-http:` (Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically `http:`, `https:` or `aac:`.
To force display and controls to Radio format set `force_radio=True`
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonos® (as of at least version 6.4.2) means that
the devices no longer accepts ordinary `http:` and `https:` URIs for
radio stations. This method has the option to replaces these
prefixes with the one that Sonos® expects: `x-rincon-mp3radio:` by
using the "force_radio=True" parameter.
A few streams may fail if not forced to to Radio format.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(
title=escape(title),
service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(':')
if colon > 0:
uri = 'x-rincon-mp3radio{0}'.format(uri[colon:])
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def previous(self):
"""Go back to the previously played track.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
@mute.setter
def mute(self, mute):
"""Mute (or unmute) the speaker."""
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
"""int: The speaker's volume.
An integer between 0 and 100.
"""
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
"""Set the speaker's volume."""
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
"""Set the speaker's bass."""
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
"""int: The speaker's treble EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
"""Set the speaker's treble."""
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
def loudness(self):
"""bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness))
@loudness.setter
def loudness(self, loudness):
"""Switch on/off the speaker's loudness compensation."""
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
@property
def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'NightMode')
])
return bool(int(response['CurrentValue']))
@night_mode.setter
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = 'This device does not support night mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'NightMode'),
('DesiredValue', int(night_mode))
])
@property
def dialog_mode(self):
"""bool: Get the Sonos speaker's dialog mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel')
])
return bool(int(response['CurrentValue']))
@dialog_mode.setter
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = 'This device does not support dialog mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel'),
('DesiredValue', int(dialog_mode))
])
def _parse_zone_group_state(self):
"""The Zone Group State contains a lot of useful information.
Retrieve and parse it, and populate the relevant properties.
"""
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
"""Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it."""
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# share our cache
zone._zgs_cache = self._zgs_cache
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = (member_attribs.get('Invisible') != '1')
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache=self._zgs_cache)['ZoneGroupState']
if zgs == self._zgs_result:
return
self._zgs_result = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# With some versions, the response is wrapped in ZoneGroupState
tree = tree.find('ZoneGroups') or tree
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = (
member_element.attrib.get('IsZoneBridge') == '1')
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
"""set of :class:`soco.groups.ZoneGroup`: All available groups."""
self._parse_zone_group_state()
return self._groups.copy()
@property
def group(self):
""":class:`soco.groups.ZoneGroup`: The Zone Group of which this device
is a member.
None if this zone is a slave in a stereo pair.
"""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All available zones."""
self._parse_zone_group_state()
return self._all_zones.copy()
@property
def visible_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All visible zones."""
self._parse_zone_group_state()
return self._visible_zones.copy()
def partymode(self):
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
"""Join this speaker to another "master" speaker."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def unjoin(self):
"""Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
])
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-mp3radio:', track_uri) is not None
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-stream:', track_uri) is not None
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-sonos-htastream:', track_uri) is not None
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent') or ''
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = \
self.music_library.build_album_art_full_uri(album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/xml/device_description.xml',
timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}iconList/'
'{urn:schemas-upnp-org:device-1-0}icon/'
'{urn:schemas-upnp-org:device-1-0}url'
)
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for
`get_music_library_information('sonos_playlists')`.
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.music_library.get_music_library_information(*args,
**kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if `play_mode=SHUFFLE`.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', position),
('EnqueueAsNext', int(as_next))
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = '' # Sonos seems to accept this as well
container_metadata = '' # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index:index + chunk_size]
uris = ' '.join([item.resources[0].uri for item in chunk])
uri_metadata = ' '.join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue([
('InstanceID', 0),
('UpdateID', 0),
('NumberOfURIs', len(chunk)),
('EnqueuedURIs', uris),
('EnqueuedURIsMetaData', uri_metadata),
('ContainerURI', container_uri),
('ContainerMetaData', container_metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 0)
])
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
@deprecated('0.13', "soco.music_library.get_favorite_radio_shows", '0.15')
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated('0.13', "soco.music_library.get_favorite_radio_stations",
'0.15')
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated('0.13', "soco.music_library.get_sonos_favorites", '0.15')
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(
sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
('AddAtIndex', 4294967295)
])
@only_on_master
def set_sleep_timer(self, sleep_time_seconds):
"""Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
"""
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
Returns:
int or NoneType: Number of seconds left in timer. If there is no
sleep timer currently set it will return None.
"""
resp = self.avTransport.GetRemainingSleepTimerDuration([
('InstanceID', 0),
])
if resp['RemainingSleepTimerDuration']:
times = resp['RemainingSleepTimerDuration'].split(':')
return (int(times[0]) * 3600 +
int(times[1]) * 60 +
int(times[2]))
else:
return None
@only_on_master
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response
@only_on_master
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr('item_id',
sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ','.join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks,
new_pos='', update_id=update_id)
else:
return {'change': 0, 'update_id': update_id, 'length': count}
@only_on_master
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos,
update_id=0):
"""Move a track to a new position within a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): **0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
new_pos (int): **0**-based location to move the track.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track),
int(new_pos), update_id)
@only_on_master
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id)
@only_on_master
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match))
|
amelchio/pysonos
|
pysonos/core.py
|
SoCo.loudness
|
python
|
def loudness(self):
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness))
|
bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L802-L815
| null |
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
.. rubric:: Basic Methods
.. autosummary::
play_from_queue
play
play_uri
pause
stop
seek
next
previous
mute
volume
play_mode
cross_fade
ramp_to_volume
get_current_track_info
get_speaker_info
get_current_transport_info
.. rubric:: Queue Management
.. autosummary::
get_queue
queue_size
add_to_queue
add_uri_to_queue
add_multiple_to_queue
remove_from_queue
clear_queue
.. rubric:: Group Management
.. autosummary::
group
partymode
join
unjoin
all_groups
all_zones
visible_zones
.. rubric:: Player Identity and Settings
.. autosummary::
player_name
uid
household_id
is_visible
is_bridge
is_coordinator
is_soundbar
bass
treble
loudness
night_mode
dialog_mode
status_light
.. rubric:: Playlists and Favorites
.. autosummary::
get_sonos_playlists
create_sonos_playlist
create_sonos_playlist_from_queue
remove_sonos_playlist
add_item_to_sonos_playlist
reorder_sonos_playlist
clear_sonos_playlist
move_in_sonos_playlist
remove_from_sonos_playlist
get_sonos_playlist_by_attr
get_favorite_radio_shows
get_favorite_radio_stations
get_sonos_favorites
.. rubric:: Miscellaneous
.. autosummary::
switch_to_line_in
is_playing_radio
is_playing_line_in
is_playing_tv
switch_to_tv
set_sleep_timer
get_sleep_timer
.. warning::
Properties on this object are not generally cached and may obtain
information over the network, so may take longer than expected to set
or return a value. It may be a good idea for you to cache the value in
your own code.
.. note::
Since all methods/properties on this object will result in an UPnP
request, they might result in an exception without it being mentioned
in the Raises section.
In most cases, the exception will be a
:class:`soco.exceptions.SoCoUPnPException`
(if the player returns an UPnP error code), but in special cases
it might also be another :class:`soco.exceptions.SoCoException`
or even a `requests` exception.
"""
_class_group = 'SoCo'
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = Cache(default_timeout=5)
self._zgs_result = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
"""str: The speaker's name."""
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
"""Set the speaker's name."""
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
"""str: A unique identifier.
Looks like: ``'RINCON_000XXXXXXXXXX1400'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def household_id(self):
"""str: A unique identifier for all players in a household.
Looks like: ``'Sonos_asahHKgjgJGjgjGjggjJgjJG34'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, return the cached version
if self._household_id is None:
self._household_id = self.deviceProperties.GetHouseholdID()[
'CurrentHouseholdID']
return self._household_id
@property
def is_visible(self):
"""bool: Is this zone visible?
A zone might be invisible if, for example, it is a bridge, or the slave
part of stereo pair.
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
"""bool: Is this zone a group coordinator?"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def is_soundbar(self):
"""bool: Is this zone a soundbar (i.e. has night mode etc.)?"""
if self._is_soundbar is None:
if not self.speaker_info:
self.get_speaker_info()
model_name = self.speaker_info['model_name'].lower()
self._is_soundbar = any(model_name.endswith(s) for s in SOUNDBARS)
return self._is_soundbar
@property
def play_mode(self):
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
@property
def shuffle(self):
"""bool: The queue's shuffle option.
True if enabled, False otherwise.
"""
return PLAY_MODES[self.play_mode][0]
@shuffle.setter
def shuffle(self, shuffle):
"""Set the queue's shuffle option."""
repeat = self.repeat
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
def repeat(self):
"""bool: The queue's repeat option.
True if enabled, False otherwise.
Might also be ``'ONE'`` if repeating the same title is enabled
(not supported by the official controller).
"""
return PLAY_MODES[self.play_mode][1]
@repeat.setter
def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
@only_on_master # Only for symmetry with the setter
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state))
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def ramp_to_volume(self, volume, ramp_type='SLEEP_TIMER_RAMP_TYPE'):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume([
('InstanceID', 0),
('Channel', 'Master'),
('RampType', ramp_type),
('DesiredVolume', volume),
('ResetVolumeAfter', False),
('ProgramURI', '')
])
return int(response['RampTime'])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri='', meta='', title='', start=True,
force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream given by
the URI. For some streams at least a title is required as metadata.
This can be provided using the `meta` argument or the `title` argument.
If the `title` argument is provided minimal metadata will be generated.
If `meta` argument is provided the `title` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
`x-sonosapi-stream:`, `x-sonosapi-radio:`, `x-rincon-mp3radio:`,
`hls-radio:` default to radio or smart radio format depending on the
stream. Others default to track format: `x-file-cifs:`, `aac:`,
`http:`, `https:`, `x-sonos-spotify:` (used by Spotify),
`x-sonosapi-hls-static:` (Amazon Prime),
`x-sonos-http:` (Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically `http:`, `https:` or `aac:`.
To force display and controls to Radio format set `force_radio=True`
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonos® (as of at least version 6.4.2) means that
the devices no longer accepts ordinary `http:` and `https:` URIs for
radio stations. This method has the option to replaces these
prefixes with the one that Sonos® expects: `x-rincon-mp3radio:` by
using the "force_radio=True" parameter.
A few streams may fail if not forced to to Radio format.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(
title=escape(title),
service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(':')
if colon > 0:
uri = 'x-rincon-mp3radio{0}'.format(uri[colon:])
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def previous(self):
"""Go back to the previously played track.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state))
@mute.setter
def mute(self, mute):
"""Mute (or unmute) the speaker."""
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
"""int: The speaker's volume.
An integer between 0 and 100.
"""
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
"""Set the speaker's volume."""
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
"""Set the speaker's bass."""
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
"""int: The speaker's treble EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
"""Set the speaker's treble."""
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
@loudness.setter
def loudness(self, loudness):
"""Switch on/off the speaker's loudness compensation."""
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
@property
def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'NightMode')
])
return bool(int(response['CurrentValue']))
@night_mode.setter
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = 'This device does not support night mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'NightMode'),
('DesiredValue', int(night_mode))
])
@property
def dialog_mode(self):
"""bool: Get the Sonos speaker's dialog mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel')
])
return bool(int(response['CurrentValue']))
@dialog_mode.setter
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = 'This device does not support dialog mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel'),
('DesiredValue', int(dialog_mode))
])
def _parse_zone_group_state(self):
"""The Zone Group State contains a lot of useful information.
Retrieve and parse it, and populate the relevant properties.
"""
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
"""Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it."""
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# share our cache
zone._zgs_cache = self._zgs_cache
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = (member_attribs.get('Invisible') != '1')
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache=self._zgs_cache)['ZoneGroupState']
if zgs == self._zgs_result:
return
self._zgs_result = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# With some versions, the response is wrapped in ZoneGroupState
tree = tree.find('ZoneGroups') or tree
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = (
member_element.attrib.get('IsZoneBridge') == '1')
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
"""set of :class:`soco.groups.ZoneGroup`: All available groups."""
self._parse_zone_group_state()
return self._groups.copy()
@property
def group(self):
""":class:`soco.groups.ZoneGroup`: The Zone Group of which this device
is a member.
None if this zone is a slave in a stereo pair.
"""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All available zones."""
self._parse_zone_group_state()
return self._all_zones.copy()
@property
def visible_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All visible zones."""
self._parse_zone_group_state()
return self._visible_zones.copy()
def partymode(self):
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
"""Join this speaker to another "master" speaker."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def unjoin(self):
"""Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
])
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-mp3radio:', track_uri) is not None
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-stream:', track_uri) is not None
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-sonos-htastream:', track_uri) is not None
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent') or ''
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = \
self.music_library.build_album_art_full_uri(album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/xml/device_description.xml',
timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}iconList/'
'{urn:schemas-upnp-org:device-1-0}icon/'
'{urn:schemas-upnp-org:device-1-0}url'
)
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for
`get_music_library_information('sonos_playlists')`.
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.music_library.get_music_library_information(*args,
**kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if `play_mode=SHUFFLE`.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', position),
('EnqueueAsNext', int(as_next))
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = '' # Sonos seems to accept this as well
container_metadata = '' # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index:index + chunk_size]
uris = ' '.join([item.resources[0].uri for item in chunk])
uri_metadata = ' '.join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue([
('InstanceID', 0),
('UpdateID', 0),
('NumberOfURIs', len(chunk)),
('EnqueuedURIs', uris),
('EnqueuedURIsMetaData', uri_metadata),
('ContainerURI', container_uri),
('ContainerMetaData', container_metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 0)
])
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
@deprecated('0.13', "soco.music_library.get_favorite_radio_shows", '0.15')
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated('0.13', "soco.music_library.get_favorite_radio_stations",
'0.15')
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated('0.13', "soco.music_library.get_sonos_favorites", '0.15')
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(
sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
('AddAtIndex', 4294967295)
])
@only_on_master
def set_sleep_timer(self, sleep_time_seconds):
"""Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
"""
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
Returns:
int or NoneType: Number of seconds left in timer. If there is no
sleep timer currently set it will return None.
"""
resp = self.avTransport.GetRemainingSleepTimerDuration([
('InstanceID', 0),
])
if resp['RemainingSleepTimerDuration']:
times = resp['RemainingSleepTimerDuration'].split(':')
return (int(times[0]) * 3600 +
int(times[1]) * 60 +
int(times[2]))
else:
return None
@only_on_master
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response
@only_on_master
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr('item_id',
sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ','.join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks,
new_pos='', update_id=update_id)
else:
return {'change': 0, 'update_id': update_id, 'length': count}
@only_on_master
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos,
update_id=0):
"""Move a track to a new position within a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): **0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
new_pos (int): **0**-based location to move the track.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track),
int(new_pos), update_id)
@only_on_master
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id)
@only_on_master
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match))
|
amelchio/pysonos
|
pysonos/core.py
|
SoCo.join
|
python
|
def join(self, master):
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
self._zgs_cache.clear()
self._parse_zone_group_state()
|
Join this speaker to another "master" speaker.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L1079-L1087
| null |
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
.. rubric:: Basic Methods
.. autosummary::
play_from_queue
play
play_uri
pause
stop
seek
next
previous
mute
volume
play_mode
cross_fade
ramp_to_volume
get_current_track_info
get_speaker_info
get_current_transport_info
.. rubric:: Queue Management
.. autosummary::
get_queue
queue_size
add_to_queue
add_uri_to_queue
add_multiple_to_queue
remove_from_queue
clear_queue
.. rubric:: Group Management
.. autosummary::
group
partymode
join
unjoin
all_groups
all_zones
visible_zones
.. rubric:: Player Identity and Settings
.. autosummary::
player_name
uid
household_id
is_visible
is_bridge
is_coordinator
is_soundbar
bass
treble
loudness
night_mode
dialog_mode
status_light
.. rubric:: Playlists and Favorites
.. autosummary::
get_sonos_playlists
create_sonos_playlist
create_sonos_playlist_from_queue
remove_sonos_playlist
add_item_to_sonos_playlist
reorder_sonos_playlist
clear_sonos_playlist
move_in_sonos_playlist
remove_from_sonos_playlist
get_sonos_playlist_by_attr
get_favorite_radio_shows
get_favorite_radio_stations
get_sonos_favorites
.. rubric:: Miscellaneous
.. autosummary::
switch_to_line_in
is_playing_radio
is_playing_line_in
is_playing_tv
switch_to_tv
set_sleep_timer
get_sleep_timer
.. warning::
Properties on this object are not generally cached and may obtain
information over the network, so may take longer than expected to set
or return a value. It may be a good idea for you to cache the value in
your own code.
.. note::
Since all methods/properties on this object will result in an UPnP
request, they might result in an exception without it being mentioned
in the Raises section.
In most cases, the exception will be a
:class:`soco.exceptions.SoCoUPnPException`
(if the player returns an UPnP error code), but in special cases
it might also be another :class:`soco.exceptions.SoCoException`
or even a `requests` exception.
"""
_class_group = 'SoCo'
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = Cache(default_timeout=5)
self._zgs_result = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
"""str: The speaker's name."""
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
"""Set the speaker's name."""
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
"""str: A unique identifier.
Looks like: ``'RINCON_000XXXXXXXXXX1400'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def household_id(self):
"""str: A unique identifier for all players in a household.
Looks like: ``'Sonos_asahHKgjgJGjgjGjggjJgjJG34'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, return the cached version
if self._household_id is None:
self._household_id = self.deviceProperties.GetHouseholdID()[
'CurrentHouseholdID']
return self._household_id
@property
def is_visible(self):
"""bool: Is this zone visible?
A zone might be invisible if, for example, it is a bridge, or the slave
part of stereo pair.
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
"""bool: Is this zone a group coordinator?"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def is_soundbar(self):
"""bool: Is this zone a soundbar (i.e. has night mode etc.)?"""
if self._is_soundbar is None:
if not self.speaker_info:
self.get_speaker_info()
model_name = self.speaker_info['model_name'].lower()
self._is_soundbar = any(model_name.endswith(s) for s in SOUNDBARS)
return self._is_soundbar
@property
def play_mode(self):
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
@property
def shuffle(self):
"""bool: The queue's shuffle option.
True if enabled, False otherwise.
"""
return PLAY_MODES[self.play_mode][0]
@shuffle.setter
def shuffle(self, shuffle):
"""Set the queue's shuffle option."""
repeat = self.repeat
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
def repeat(self):
"""bool: The queue's repeat option.
True if enabled, False otherwise.
Might also be ``'ONE'`` if repeating the same title is enabled
(not supported by the official controller).
"""
return PLAY_MODES[self.play_mode][1]
@repeat.setter
def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
@only_on_master # Only for symmetry with the setter
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state))
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def ramp_to_volume(self, volume, ramp_type='SLEEP_TIMER_RAMP_TYPE'):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume([
('InstanceID', 0),
('Channel', 'Master'),
('RampType', ramp_type),
('DesiredVolume', volume),
('ResetVolumeAfter', False),
('ProgramURI', '')
])
return int(response['RampTime'])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri='', meta='', title='', start=True,
force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream given by
the URI. For some streams at least a title is required as metadata.
This can be provided using the `meta` argument or the `title` argument.
If the `title` argument is provided minimal metadata will be generated.
If `meta` argument is provided the `title` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
`x-sonosapi-stream:`, `x-sonosapi-radio:`, `x-rincon-mp3radio:`,
`hls-radio:` default to radio or smart radio format depending on the
stream. Others default to track format: `x-file-cifs:`, `aac:`,
`http:`, `https:`, `x-sonos-spotify:` (used by Spotify),
`x-sonosapi-hls-static:` (Amazon Prime),
`x-sonos-http:` (Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically `http:`, `https:` or `aac:`.
To force display and controls to Radio format set `force_radio=True`
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonos® (as of at least version 6.4.2) means that
the devices no longer accepts ordinary `http:` and `https:` URIs for
radio stations. This method has the option to replaces these
prefixes with the one that Sonos® expects: `x-rincon-mp3radio:` by
using the "force_radio=True" parameter.
A few streams may fail if not forced to to Radio format.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(
title=escape(title),
service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(':')
if colon > 0:
uri = 'x-rincon-mp3radio{0}'.format(uri[colon:])
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def previous(self):
"""Go back to the previously played track.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state))
@mute.setter
def mute(self, mute):
"""Mute (or unmute) the speaker."""
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
"""int: The speaker's volume.
An integer between 0 and 100.
"""
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
"""Set the speaker's volume."""
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
"""Set the speaker's bass."""
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
"""int: The speaker's treble EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
"""Set the speaker's treble."""
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
def loudness(self):
"""bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness))
@loudness.setter
def loudness(self, loudness):
"""Switch on/off the speaker's loudness compensation."""
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
@property
def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'NightMode')
])
return bool(int(response['CurrentValue']))
@night_mode.setter
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = 'This device does not support night mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'NightMode'),
('DesiredValue', int(night_mode))
])
@property
def dialog_mode(self):
"""bool: Get the Sonos speaker's dialog mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel')
])
return bool(int(response['CurrentValue']))
@dialog_mode.setter
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = 'This device does not support dialog mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel'),
('DesiredValue', int(dialog_mode))
])
def _parse_zone_group_state(self):
"""The Zone Group State contains a lot of useful information.
Retrieve and parse it, and populate the relevant properties.
"""
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
"""Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it."""
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# share our cache
zone._zgs_cache = self._zgs_cache
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = (member_attribs.get('Invisible') != '1')
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache=self._zgs_cache)['ZoneGroupState']
if zgs == self._zgs_result:
return
self._zgs_result = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# With some versions, the response is wrapped in ZoneGroupState
tree = tree.find('ZoneGroups') or tree
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = (
member_element.attrib.get('IsZoneBridge') == '1')
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
"""set of :class:`soco.groups.ZoneGroup`: All available groups."""
self._parse_zone_group_state()
return self._groups.copy()
@property
def group(self):
""":class:`soco.groups.ZoneGroup`: The Zone Group of which this device
is a member.
None if this zone is a slave in a stereo pair.
"""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All available zones."""
self._parse_zone_group_state()
return self._all_zones.copy()
@property
def visible_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All visible zones."""
self._parse_zone_group_state()
return self._visible_zones.copy()
def partymode(self):
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def unjoin(self):
"""Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
])
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-mp3radio:', track_uri) is not None
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-stream:', track_uri) is not None
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-sonos-htastream:', track_uri) is not None
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent') or ''
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = \
self.music_library.build_album_art_full_uri(album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/xml/device_description.xml',
timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}iconList/'
'{urn:schemas-upnp-org:device-1-0}icon/'
'{urn:schemas-upnp-org:device-1-0}url'
)
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for
`get_music_library_information('sonos_playlists')`.
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.music_library.get_music_library_information(*args,
**kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if `play_mode=SHUFFLE`.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', position),
('EnqueueAsNext', int(as_next))
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = '' # Sonos seems to accept this as well
container_metadata = '' # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index:index + chunk_size]
uris = ' '.join([item.resources[0].uri for item in chunk])
uri_metadata = ' '.join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue([
('InstanceID', 0),
('UpdateID', 0),
('NumberOfURIs', len(chunk)),
('EnqueuedURIs', uris),
('EnqueuedURIsMetaData', uri_metadata),
('ContainerURI', container_uri),
('ContainerMetaData', container_metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 0)
])
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
@deprecated('0.13', "soco.music_library.get_favorite_radio_shows", '0.15')
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated('0.13', "soco.music_library.get_favorite_radio_stations",
'0.15')
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated('0.13', "soco.music_library.get_sonos_favorites", '0.15')
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(
sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
('AddAtIndex', 4294967295)
])
@only_on_master
def set_sleep_timer(self, sleep_time_seconds):
"""Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
"""
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
Returns:
int or NoneType: Number of seconds left in timer. If there is no
sleep timer currently set it will return None.
"""
resp = self.avTransport.GetRemainingSleepTimerDuration([
('InstanceID', 0),
])
if resp['RemainingSleepTimerDuration']:
times = resp['RemainingSleepTimerDuration'].split(':')
return (int(times[0]) * 3600 +
int(times[1]) * 60 +
int(times[2]))
else:
return None
@only_on_master
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response
@only_on_master
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr('item_id',
sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ','.join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks,
new_pos='', update_id=update_id)
else:
return {'change': 0, 'update_id': update_id, 'length': count}
@only_on_master
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos,
update_id=0):
"""Move a track to a new position within a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): **0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
new_pos (int): **0**-based location to move the track.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track),
int(new_pos), update_id)
@only_on_master
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id)
@only_on_master
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match))
|
amelchio/pysonos
|
pysonos/core.py
|
SoCo.unjoin
|
python
|
def unjoin(self):
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
self._zgs_cache.clear()
self._parse_zone_group_state()
|
Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L1089-L1101
| null |
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
.. rubric:: Basic Methods
.. autosummary::
play_from_queue
play
play_uri
pause
stop
seek
next
previous
mute
volume
play_mode
cross_fade
ramp_to_volume
get_current_track_info
get_speaker_info
get_current_transport_info
.. rubric:: Queue Management
.. autosummary::
get_queue
queue_size
add_to_queue
add_uri_to_queue
add_multiple_to_queue
remove_from_queue
clear_queue
.. rubric:: Group Management
.. autosummary::
group
partymode
join
unjoin
all_groups
all_zones
visible_zones
.. rubric:: Player Identity and Settings
.. autosummary::
player_name
uid
household_id
is_visible
is_bridge
is_coordinator
is_soundbar
bass
treble
loudness
night_mode
dialog_mode
status_light
.. rubric:: Playlists and Favorites
.. autosummary::
get_sonos_playlists
create_sonos_playlist
create_sonos_playlist_from_queue
remove_sonos_playlist
add_item_to_sonos_playlist
reorder_sonos_playlist
clear_sonos_playlist
move_in_sonos_playlist
remove_from_sonos_playlist
get_sonos_playlist_by_attr
get_favorite_radio_shows
get_favorite_radio_stations
get_sonos_favorites
.. rubric:: Miscellaneous
.. autosummary::
switch_to_line_in
is_playing_radio
is_playing_line_in
is_playing_tv
switch_to_tv
set_sleep_timer
get_sleep_timer
.. warning::
Properties on this object are not generally cached and may obtain
information over the network, so may take longer than expected to set
or return a value. It may be a good idea for you to cache the value in
your own code.
.. note::
Since all methods/properties on this object will result in an UPnP
request, they might result in an exception without it being mentioned
in the Raises section.
In most cases, the exception will be a
:class:`soco.exceptions.SoCoUPnPException`
(if the player returns an UPnP error code), but in special cases
it might also be another :class:`soco.exceptions.SoCoException`
or even a `requests` exception.
"""
_class_group = 'SoCo'
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = Cache(default_timeout=5)
self._zgs_result = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
"""str: The speaker's name."""
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
"""Set the speaker's name."""
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
"""str: A unique identifier.
Looks like: ``'RINCON_000XXXXXXXXXX1400'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def household_id(self):
"""str: A unique identifier for all players in a household.
Looks like: ``'Sonos_asahHKgjgJGjgjGjggjJgjJG34'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, return the cached version
if self._household_id is None:
self._household_id = self.deviceProperties.GetHouseholdID()[
'CurrentHouseholdID']
return self._household_id
@property
def is_visible(self):
"""bool: Is this zone visible?
A zone might be invisible if, for example, it is a bridge, or the slave
part of stereo pair.
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
"""bool: Is this zone a group coordinator?"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def is_soundbar(self):
"""bool: Is this zone a soundbar (i.e. has night mode etc.)?"""
if self._is_soundbar is None:
if not self.speaker_info:
self.get_speaker_info()
model_name = self.speaker_info['model_name'].lower()
self._is_soundbar = any(model_name.endswith(s) for s in SOUNDBARS)
return self._is_soundbar
@property
def play_mode(self):
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
@property
def shuffle(self):
"""bool: The queue's shuffle option.
True if enabled, False otherwise.
"""
return PLAY_MODES[self.play_mode][0]
@shuffle.setter
def shuffle(self, shuffle):
"""Set the queue's shuffle option."""
repeat = self.repeat
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
def repeat(self):
"""bool: The queue's repeat option.
True if enabled, False otherwise.
Might also be ``'ONE'`` if repeating the same title is enabled
(not supported by the official controller).
"""
return PLAY_MODES[self.play_mode][1]
@repeat.setter
def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
@only_on_master # Only for symmetry with the setter
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state))
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def ramp_to_volume(self, volume, ramp_type='SLEEP_TIMER_RAMP_TYPE'):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume([
('InstanceID', 0),
('Channel', 'Master'),
('RampType', ramp_type),
('DesiredVolume', volume),
('ResetVolumeAfter', False),
('ProgramURI', '')
])
return int(response['RampTime'])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri='', meta='', title='', start=True,
force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream given by
the URI. For some streams at least a title is required as metadata.
This can be provided using the `meta` argument or the `title` argument.
If the `title` argument is provided minimal metadata will be generated.
If `meta` argument is provided the `title` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
`x-sonosapi-stream:`, `x-sonosapi-radio:`, `x-rincon-mp3radio:`,
`hls-radio:` default to radio or smart radio format depending on the
stream. Others default to track format: `x-file-cifs:`, `aac:`,
`http:`, `https:`, `x-sonos-spotify:` (used by Spotify),
`x-sonosapi-hls-static:` (Amazon Prime),
`x-sonos-http:` (Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically `http:`, `https:` or `aac:`.
To force display and controls to Radio format set `force_radio=True`
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonos® (as of at least version 6.4.2) means that
the devices no longer accepts ordinary `http:` and `https:` URIs for
radio stations. This method has the option to replaces these
prefixes with the one that Sonos® expects: `x-rincon-mp3radio:` by
using the "force_radio=True" parameter.
A few streams may fail if not forced to to Radio format.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(
title=escape(title),
service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(':')
if colon > 0:
uri = 'x-rincon-mp3radio{0}'.format(uri[colon:])
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def previous(self):
"""Go back to the previously played track.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state))
@mute.setter
def mute(self, mute):
"""Mute (or unmute) the speaker."""
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
"""int: The speaker's volume.
An integer between 0 and 100.
"""
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
"""Set the speaker's volume."""
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
"""Set the speaker's bass."""
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
"""int: The speaker's treble EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
"""Set the speaker's treble."""
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
def loudness(self):
"""bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness))
@loudness.setter
def loudness(self, loudness):
"""Switch on/off the speaker's loudness compensation."""
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
@property
def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'NightMode')
])
return bool(int(response['CurrentValue']))
@night_mode.setter
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = 'This device does not support night mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'NightMode'),
('DesiredValue', int(night_mode))
])
@property
def dialog_mode(self):
"""bool: Get the Sonos speaker's dialog mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel')
])
return bool(int(response['CurrentValue']))
@dialog_mode.setter
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = 'This device does not support dialog mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel'),
('DesiredValue', int(dialog_mode))
])
def _parse_zone_group_state(self):
"""The Zone Group State contains a lot of useful information.
Retrieve and parse it, and populate the relevant properties.
"""
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
"""Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it."""
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# share our cache
zone._zgs_cache = self._zgs_cache
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = (member_attribs.get('Invisible') != '1')
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache=self._zgs_cache)['ZoneGroupState']
if zgs == self._zgs_result:
return
self._zgs_result = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# With some versions, the response is wrapped in ZoneGroupState
tree = tree.find('ZoneGroups') or tree
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = (
member_element.attrib.get('IsZoneBridge') == '1')
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
"""set of :class:`soco.groups.ZoneGroup`: All available groups."""
self._parse_zone_group_state()
return self._groups.copy()
@property
def group(self):
""":class:`soco.groups.ZoneGroup`: The Zone Group of which this device
is a member.
None if this zone is a slave in a stereo pair.
"""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All available zones."""
self._parse_zone_group_state()
return self._all_zones.copy()
@property
def visible_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All visible zones."""
self._parse_zone_group_state()
return self._visible_zones.copy()
def partymode(self):
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
"""Join this speaker to another "master" speaker."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
])
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-mp3radio:', track_uri) is not None
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-stream:', track_uri) is not None
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-sonos-htastream:', track_uri) is not None
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent') or ''
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = \
self.music_library.build_album_art_full_uri(album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/xml/device_description.xml',
timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}iconList/'
'{urn:schemas-upnp-org:device-1-0}icon/'
'{urn:schemas-upnp-org:device-1-0}url'
)
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for
`get_music_library_information('sonos_playlists')`.
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.music_library.get_music_library_information(*args,
**kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if `play_mode=SHUFFLE`.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', position),
('EnqueueAsNext', int(as_next))
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = '' # Sonos seems to accept this as well
container_metadata = '' # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index:index + chunk_size]
uris = ' '.join([item.resources[0].uri for item in chunk])
uri_metadata = ' '.join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue([
('InstanceID', 0),
('UpdateID', 0),
('NumberOfURIs', len(chunk)),
('EnqueuedURIs', uris),
('EnqueuedURIsMetaData', uri_metadata),
('ContainerURI', container_uri),
('ContainerMetaData', container_metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 0)
])
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
@deprecated('0.13', "soco.music_library.get_favorite_radio_shows", '0.15')
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated('0.13', "soco.music_library.get_favorite_radio_stations",
'0.15')
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated('0.13', "soco.music_library.get_sonos_favorites", '0.15')
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(
sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
('AddAtIndex', 4294967295)
])
@only_on_master
def set_sleep_timer(self, sleep_time_seconds):
"""Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
"""
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
Returns:
int or NoneType: Number of seconds left in timer. If there is no
sleep timer currently set it will return None.
"""
resp = self.avTransport.GetRemainingSleepTimerDuration([
('InstanceID', 0),
])
if resp['RemainingSleepTimerDuration']:
times = resp['RemainingSleepTimerDuration'].split(':')
return (int(times[0]) * 3600 +
int(times[1]) * 60 +
int(times[2]))
else:
return None
@only_on_master
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response
@only_on_master
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr('item_id',
sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ','.join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks,
new_pos='', update_id=update_id)
else:
return {'change': 0, 'update_id': update_id, 'length': count}
@only_on_master
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos,
update_id=0):
"""Move a track to a new position within a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): **0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
new_pos (int): **0**-based location to move the track.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track),
int(new_pos), update_id)
@only_on_master
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id)
@only_on_master
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match))
|
amelchio/pysonos
|
pysonos/core.py
|
SoCo.set_sleep_timer
|
python
|
def set_sleep_timer(self, sleep_time_seconds):
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
|
Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L1714-L1749
| null |
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
.. rubric:: Basic Methods
.. autosummary::
play_from_queue
play
play_uri
pause
stop
seek
next
previous
mute
volume
play_mode
cross_fade
ramp_to_volume
get_current_track_info
get_speaker_info
get_current_transport_info
.. rubric:: Queue Management
.. autosummary::
get_queue
queue_size
add_to_queue
add_uri_to_queue
add_multiple_to_queue
remove_from_queue
clear_queue
.. rubric:: Group Management
.. autosummary::
group
partymode
join
unjoin
all_groups
all_zones
visible_zones
.. rubric:: Player Identity and Settings
.. autosummary::
player_name
uid
household_id
is_visible
is_bridge
is_coordinator
is_soundbar
bass
treble
loudness
night_mode
dialog_mode
status_light
.. rubric:: Playlists and Favorites
.. autosummary::
get_sonos_playlists
create_sonos_playlist
create_sonos_playlist_from_queue
remove_sonos_playlist
add_item_to_sonos_playlist
reorder_sonos_playlist
clear_sonos_playlist
move_in_sonos_playlist
remove_from_sonos_playlist
get_sonos_playlist_by_attr
get_favorite_radio_shows
get_favorite_radio_stations
get_sonos_favorites
.. rubric:: Miscellaneous
.. autosummary::
switch_to_line_in
is_playing_radio
is_playing_line_in
is_playing_tv
switch_to_tv
set_sleep_timer
get_sleep_timer
.. warning::
Properties on this object are not generally cached and may obtain
information over the network, so may take longer than expected to set
or return a value. It may be a good idea for you to cache the value in
your own code.
.. note::
Since all methods/properties on this object will result in an UPnP
request, they might result in an exception without it being mentioned
in the Raises section.
In most cases, the exception will be a
:class:`soco.exceptions.SoCoUPnPException`
(if the player returns an UPnP error code), but in special cases
it might also be another :class:`soco.exceptions.SoCoException`
or even a `requests` exception.
"""
_class_group = 'SoCo'
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = Cache(default_timeout=5)
self._zgs_result = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
"""str: The speaker's name."""
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
"""Set the speaker's name."""
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
"""str: A unique identifier.
Looks like: ``'RINCON_000XXXXXXXXXX1400'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def household_id(self):
"""str: A unique identifier for all players in a household.
Looks like: ``'Sonos_asahHKgjgJGjgjGjggjJgjJG34'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, return the cached version
if self._household_id is None:
self._household_id = self.deviceProperties.GetHouseholdID()[
'CurrentHouseholdID']
return self._household_id
@property
def is_visible(self):
"""bool: Is this zone visible?
A zone might be invisible if, for example, it is a bridge, or the slave
part of stereo pair.
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
"""bool: Is this zone a group coordinator?"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def is_soundbar(self):
"""bool: Is this zone a soundbar (i.e. has night mode etc.)?"""
if self._is_soundbar is None:
if not self.speaker_info:
self.get_speaker_info()
model_name = self.speaker_info['model_name'].lower()
self._is_soundbar = any(model_name.endswith(s) for s in SOUNDBARS)
return self._is_soundbar
@property
def play_mode(self):
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
@property
def shuffle(self):
"""bool: The queue's shuffle option.
True if enabled, False otherwise.
"""
return PLAY_MODES[self.play_mode][0]
@shuffle.setter
def shuffle(self, shuffle):
"""Set the queue's shuffle option."""
repeat = self.repeat
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
def repeat(self):
"""bool: The queue's repeat option.
True if enabled, False otherwise.
Might also be ``'ONE'`` if repeating the same title is enabled
(not supported by the official controller).
"""
return PLAY_MODES[self.play_mode][1]
@repeat.setter
def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
@only_on_master # Only for symmetry with the setter
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state))
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def ramp_to_volume(self, volume, ramp_type='SLEEP_TIMER_RAMP_TYPE'):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume([
('InstanceID', 0),
('Channel', 'Master'),
('RampType', ramp_type),
('DesiredVolume', volume),
('ResetVolumeAfter', False),
('ProgramURI', '')
])
return int(response['RampTime'])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri='', meta='', title='', start=True,
force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream given by
the URI. For some streams at least a title is required as metadata.
This can be provided using the `meta` argument or the `title` argument.
If the `title` argument is provided minimal metadata will be generated.
If `meta` argument is provided the `title` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
`x-sonosapi-stream:`, `x-sonosapi-radio:`, `x-rincon-mp3radio:`,
`hls-radio:` default to radio or smart radio format depending on the
stream. Others default to track format: `x-file-cifs:`, `aac:`,
`http:`, `https:`, `x-sonos-spotify:` (used by Spotify),
`x-sonosapi-hls-static:` (Amazon Prime),
`x-sonos-http:` (Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically `http:`, `https:` or `aac:`.
To force display and controls to Radio format set `force_radio=True`
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonos® (as of at least version 6.4.2) means that
the devices no longer accepts ordinary `http:` and `https:` URIs for
radio stations. This method has the option to replaces these
prefixes with the one that Sonos® expects: `x-rincon-mp3radio:` by
using the "force_radio=True" parameter.
A few streams may fail if not forced to to Radio format.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(
title=escape(title),
service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(':')
if colon > 0:
uri = 'x-rincon-mp3radio{0}'.format(uri[colon:])
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def seek(self, timestamp):
"""Seek to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Raises:
ValueError: if the given timestamp is invalid.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
@only_on_master
def previous(self):
"""Go back to the previously played track.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state))
@mute.setter
def mute(self, mute):
"""Mute (or unmute) the speaker."""
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
"""int: The speaker's volume.
An integer between 0 and 100.
"""
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
"""Set the speaker's volume."""
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
"""Set the speaker's bass."""
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
"""int: The speaker's treble EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
"""Set the speaker's treble."""
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
def loudness(self):
"""bool: The Sonos speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness))
@loudness.setter
def loudness(self, loudness):
"""Switch on/off the speaker's loudness compensation."""
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
@property
def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'NightMode')
])
return bool(int(response['CurrentValue']))
@night_mode.setter
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = 'This device does not support night mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'NightMode'),
('DesiredValue', int(night_mode))
])
@property
def dialog_mode(self):
"""bool: Get the Sonos speaker's dialog mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel')
])
return bool(int(response['CurrentValue']))
@dialog_mode.setter
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = 'This device does not support dialog mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel'),
('DesiredValue', int(dialog_mode))
])
def _parse_zone_group_state(self):
"""The Zone Group State contains a lot of useful information.
Retrieve and parse it, and populate the relevant properties.
"""
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
"""Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it."""
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# share our cache
zone._zgs_cache = self._zgs_cache
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = (member_attribs.get('Invisible') != '1')
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache=self._zgs_cache)['ZoneGroupState']
if zgs == self._zgs_result:
return
self._zgs_result = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# With some versions, the response is wrapped in ZoneGroupState
tree = tree.find('ZoneGroups') or tree
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = (
member_element.attrib.get('IsZoneBridge') == '1')
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
"""set of :class:`soco.groups.ZoneGroup`: All available groups."""
self._parse_zone_group_state()
return self._groups.copy()
@property
def group(self):
""":class:`soco.groups.ZoneGroup`: The Zone Group of which this device
is a member.
None if this zone is a slave in a stereo pair.
"""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All available zones."""
self._parse_zone_group_state()
return self._all_zones.copy()
@property
def visible_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All visible zones."""
self._parse_zone_group_state()
return self._visible_zones.copy()
def partymode(self):
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
"""Join this speaker to another "master" speaker."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def unjoin(self):
"""Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
self._zgs_cache.clear()
self._parse_zone_group_state()
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
])
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-mp3radio:', track_uri) is not None
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-rincon-stream:', track_uri) is not None
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track_uri = response['TrackURI']
return re.match(r'^x-sonos-htastream:', track_uri) is not None
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent') or ''
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = \
self.music_library.build_album_art_full_uri(album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
`(connect timeout, read timeout)` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/xml/device_description.xml',
timeout=timeout)
dom = XML.fromstring(response.content)
device = dom.find('{urn:schemas-upnp-org:device-1-0}device')
if device is not None:
self.speaker_info['zone_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}roomName')
# no zone icon in device_description.xml -> player icon
self.speaker_info['player_icon'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}iconList/'
'{urn:schemas-upnp-org:device-1-0}icon/'
'{urn:schemas-upnp-org:device-1-0}url'
)
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}serialNum')
self.speaker_info['software_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}softwareVersion')
self.speaker_info['hardware_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}hardwareVersion')
self.speaker_info['model_number'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelNumber')
self.speaker_info['model_name'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}modelName')
self.speaker_info['display_version'] = device.findtext(
'{urn:schemas-upnp-org:device-1-0}displayVersion')
# no mac address - extract from serial number
mac = self.speaker_info['serial_number'].split(':')[0]
self.speaker_info['mac_address'] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for
`get_music_library_information('sonos_playlists')`.
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.music_library.get_music_library_information(*args,
**kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if `play_mode=SHUFFLE`.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', position),
('EnqueueAsNext', int(as_next))
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = '' # Sonos seems to accept this as well
container_metadata = '' # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index:index + chunk_size]
uris = ' '.join([item.resources[0].uri for item in chunk])
uri_metadata = ' '.join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue([
('InstanceID', 0),
('UpdateID', 0),
('NumberOfURIs', len(chunk)),
('EnqueuedURIs', uris),
('EnqueuedURIsMetaData', uri_metadata),
('ContainerURI', container_uri),
('ContainerMetaData', container_metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 0)
])
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
@deprecated('0.13', "soco.music_library.get_favorite_radio_shows", '0.15')
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated('0.13', "soco.music_library.get_favorite_radio_stations",
'0.15')
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated('0.13', "soco.music_library.get_sonos_favorites", '0.15')
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = 'The output type of this method will probably change in '\
'the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(
sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
('AddAtIndex', 4294967295)
])
@only_on_master
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
Returns:
int or NoneType: Number of seconds left in timer. If there is no
sleep timer currently set it will return None.
"""
resp = self.avTransport.GetRemainingSleepTimerDuration([
('InstanceID', 0),
])
if resp['RemainingSleepTimerDuration']:
times = resp['RemainingSleepTimerDuration'].split(':')
return (int(times[0]) * 3600 +
int(times[1]) * 60 +
int(times[2]))
else:
return None
@only_on_master
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response
@only_on_master
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr('item_id',
sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ','.join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks,
new_pos='', update_id=update_id)
else:
return {'change': 0, 'update_id': update_id, 'length': count}
@only_on_master
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos,
update_id=0):
"""Move a track to a new position within a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): **0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
new_pos (int): **0**-based location to move the track.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track),
int(new_pos), update_id)
@only_on_master
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id)
@only_on_master
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match))
|
amelchio/pysonos
|
pysonos/snapshot.py
|
Snapshot.restore
|
python
|
def restore(self, fade=False):
try:
if self.is_coordinator:
self._restore_coordinator()
finally:
self._restore_volume(fade)
# Now everything is set, see if we need to be playing, stopped
# or paused ( only for coordinators)
if self.is_coordinator:
if self.transport_state == 'PLAYING':
self.device.play()
elif self.transport_state == 'STOPPED':
self.device.stop()
|
Restore the state of a device to that which was previously saved.
For coordinator devices restore everything. For slave devices
only restore volume etc., not transport info (transport info
comes from the slave's coordinator).
Args:
fade (bool): Whether volume should be faded up on restore.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/snapshot.py#L161-L184
|
[
"def _restore_coordinator(self):\n \"\"\"Do the coordinator-only part of the restore.\"\"\"\n # Start by ensuring that the speaker is paused as we don't want\n # things all rolling back when we are changing them, as this could\n # include things like audio\n transport_info = self.device.get_current_transport_info()\n if transport_info is not None:\n if transport_info['current_transport_state'] == 'PLAYING':\n self.device.pause()\n\n # Check if the queue should be restored\n self._restore_queue()\n\n # Reinstate what was playing\n\n if self.is_playing_queue and self.playlist_position > 0:\n # was playing from playlist\n\n if self.playlist_position is not None:\n # The position in the playlist returned by\n # get_current_track_info starts at 1, but when\n # playing from playlist, the index starts at 0\n # if position > 0:\n self.playlist_position -= 1\n self.device.play_from_queue(self.playlist_position, False)\n\n if self.track_position is not None:\n if self.track_position != \"\":\n self.device.seek(self.track_position)\n\n # reinstate track, position, play mode, cross fade\n # Need to make sure there is a proper track selected first\n self.device.play_mode = self.play_mode\n self.device.cross_fade = self.cross_fade\n\n elif self.is_playing_cloud_queue:\n # was playing a cloud queue started by Alexa\n # No way yet to re-start this so prevent it throwing an error!\n pass\n\n else:\n # was playing a stream (radio station, file, or nothing)\n # reinstate uri and meta data\n if self.media_uri != \"\":\n self.device.play_uri(\n self.media_uri, self.media_metadata, start=False)\n",
"def _restore_volume(self, fade):\n \"\"\"Reinstate volume.\n\n Args:\n fade (bool): Whether volume should be faded up on restore.\n \"\"\"\n self.device.mute = self.mute\n\n # Can only change volume on device with fixed volume set to False\n # otherwise get uPnP error, so check first. Before issuing a network\n # command to check, fixed volume always has volume set to 100.\n # So only checked fixed volume if volume is 100.\n if self.volume == 100:\n fixed_vol = self.device.renderingControl.GetOutputFixed(\n [('InstanceID', 0)])['CurrentFixed']\n else:\n fixed_vol = False\n\n # now set volume if not fixed\n if not fixed_vol:\n self.device.bass = self.bass\n self.device.treble = self.treble\n self.device.loudness = self.loudness\n\n if fade:\n # if fade requested in restore\n # set volume to 0 then fade up to saved volume (non blocking)\n self.device.volume = 0\n self.device.ramp_to_volume(self.volume)\n else:\n # set volume\n self.device.volume = self.volume\n"
] |
class Snapshot(object):
"""A snapshot of the current state.
Note:
This does not change anything to do with the configuration
such as which group the speaker is in, just settings that impact
what is playing, or how it is played.
List of sources that may be playing using root of media_uri:
| ``x-rincon-queue``: playing from Queue
| ``x-sonosapi-stream``: playing a stream (eg radio)
| ``x-file-cifs``: playing file
| ``x-rincon``: slave zone (only change volume etc. rest from
coordinator)
"""
def __init__(self, device, snapshot_queue=False):
"""
Args:
device (SoCo): The device to snapshot
snapshot_queue (bool): Whether the queue should be snapshotted.
Defaults to `False`.
Warning:
It is strongly advised that you do not snapshot the queue unless
you really need to as it takes a very long time to restore large
queues as it is done one track at a time.
"""
# The device that will be snapshotted
self.device = device
# The values that will be stored
# For all zones:
self.media_uri = None
self.is_coordinator = False
self.is_playing_queue = False
self.is_playing_cloud_queue = False
self.volume = None
self.mute = None
self.bass = None
self.treble = None
self.loudness = None
# For coordinator zone playing from Queue:
self.play_mode = None
self.cross_fade = None
self.playlist_position = 0
self.track_position = None
# For coordinator zone playing a Stream:
self.media_metadata = None
# For all coordinator zones
self.transport_state = None
self.queue = None
# Only set the queue as a list if we are going to save it
if snapshot_queue:
self.queue = []
def snapshot(self):
"""Record and store the current state of a device.
Returns:
bool: `True` if the device is a coordinator, `False` otherwise.
Useful for determining whether playing an alert on a device
will ungroup it.
"""
# get if device coordinator (or slave) True (or False)
self.is_coordinator = self.device.is_coordinator
# Get information about the currently playing media
media_info = self.device.avTransport.GetMediaInfo([('InstanceID', 0)])
self.media_uri = media_info['CurrentURI']
# Extract source from media uri - below some media URI value examples:
# 'x-rincon-queue:RINCON_000E5859E49601400#0'
# - playing a local queue always #0 for local queue)
#
# 'x-rincon-queue:RINCON_000E5859E49601400#6'
# - playing a cloud queue where #x changes with each queue)
#
# -'x-rincon:RINCON_000E5859E49601400'
# - a slave player pointing to coordinator player
if self.media_uri.split(':')[0] == 'x-rincon-queue':
# The pylint error below is a false positive, see about removing it
# in the future
# pylint: disable=simplifiable-if-statement
if self.media_uri.split('#')[1] == '0':
# playing local queue
self.is_playing_queue = True
else:
# playing cloud queue - started from Alexa
self.is_playing_cloud_queue = True
# Save the volume, mute and other sound settings
self.volume = self.device.volume
self.mute = self.device.mute
self.bass = self.device.bass
self.treble = self.device.treble
self.loudness = self.device.loudness
# get details required for what's playing:
if self.is_playing_queue:
# playing from queue - save repeat, random, cross fade, track, etc.
self.play_mode = self.device.play_mode
self.cross_fade = self.device.cross_fade
# Get information about the currently playing track
track_info = self.device.get_current_track_info()
if track_info is not None:
position = track_info['playlist_position']
if position != "":
# save as integer
self.playlist_position = int(position)
self.track_position = track_info['position']
else:
# playing from a stream - save media metadata
self.media_metadata = media_info['CurrentURIMetaData']
# Work out what the playing state is - if a coordinator
if self.is_coordinator:
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
self.transport_state = transport_info[
'current_transport_state']
# Save of the current queue if we need to
self._save_queue()
# return if device is a coordinator (helps usage)
return self.is_coordinator
# pylint: disable=too-many-branches
def _restore_coordinator(self):
"""Do the coordinator-only part of the restore."""
# Start by ensuring that the speaker is paused as we don't want
# things all rolling back when we are changing them, as this could
# include things like audio
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
if transport_info['current_transport_state'] == 'PLAYING':
self.device.pause()
# Check if the queue should be restored
self._restore_queue()
# Reinstate what was playing
if self.is_playing_queue and self.playlist_position > 0:
# was playing from playlist
if self.playlist_position is not None:
# The position in the playlist returned by
# get_current_track_info starts at 1, but when
# playing from playlist, the index starts at 0
# if position > 0:
self.playlist_position -= 1
self.device.play_from_queue(self.playlist_position, False)
if self.track_position is not None:
if self.track_position != "":
self.device.seek(self.track_position)
# reinstate track, position, play mode, cross fade
# Need to make sure there is a proper track selected first
self.device.play_mode = self.play_mode
self.device.cross_fade = self.cross_fade
elif self.is_playing_cloud_queue:
# was playing a cloud queue started by Alexa
# No way yet to re-start this so prevent it throwing an error!
pass
else:
# was playing a stream (radio station, file, or nothing)
# reinstate uri and meta data
if self.media_uri != "":
self.device.play_uri(
self.media_uri, self.media_metadata, start=False)
def _restore_volume(self, fade):
"""Reinstate volume.
Args:
fade (bool): Whether volume should be faded up on restore.
"""
self.device.mute = self.mute
# Can only change volume on device with fixed volume set to False
# otherwise get uPnP error, so check first. Before issuing a network
# command to check, fixed volume always has volume set to 100.
# So only checked fixed volume if volume is 100.
if self.volume == 100:
fixed_vol = self.device.renderingControl.GetOutputFixed(
[('InstanceID', 0)])['CurrentFixed']
else:
fixed_vol = False
# now set volume if not fixed
if not fixed_vol:
self.device.bass = self.bass
self.device.treble = self.treble
self.device.loudness = self.loudness
if fade:
# if fade requested in restore
# set volume to 0 then fade up to saved volume (non blocking)
self.device.volume = 0
self.device.ramp_to_volume(self.volume)
else:
# set volume
self.device.volume = self.volume
def _save_queue(self):
"""Save the current state of the queue."""
if self.queue is not None:
# Maximum batch is 486, anything larger will still only
# return 486
batch_size = 400
total = 0
num_return = batch_size
# Need to get all the tracks in batches, but Only get the next
# batch if all the items requested were in the last batch
while num_return == batch_size:
queue_items = self.device.get_queue(total, batch_size)
# Check how many entries were returned
num_return = len(queue_items)
# Make sure the queue is not empty
if num_return > 0:
self.queue.append(queue_items)
# Update the total that have been processed
total = total + num_return
def _restore_queue(self):
"""Restore the previous state of the queue.
Note:
The restore currently adds the items back into the queue
using the URI, for items the Sonos system already knows about
this is OK, but for other items, they may be missing some of
their metadata as it will not be automatically picked up.
"""
if self.queue is not None:
# Clear the queue so that it can be reset
self.device.clear_queue()
# Now loop around all the queue entries adding them
for queue_group in self.queue:
for queue_item in queue_group:
self.device.add_uri_to_queue(queue_item.uri)
def __enter__(self):
self.snapshot()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.restore()
|
amelchio/pysonos
|
pysonos/snapshot.py
|
Snapshot._restore_coordinator
|
python
|
def _restore_coordinator(self):
# Start by ensuring that the speaker is paused as we don't want
# things all rolling back when we are changing them, as this could
# include things like audio
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
if transport_info['current_transport_state'] == 'PLAYING':
self.device.pause()
# Check if the queue should be restored
self._restore_queue()
# Reinstate what was playing
if self.is_playing_queue and self.playlist_position > 0:
# was playing from playlist
if self.playlist_position is not None:
# The position in the playlist returned by
# get_current_track_info starts at 1, but when
# playing from playlist, the index starts at 0
# if position > 0:
self.playlist_position -= 1
self.device.play_from_queue(self.playlist_position, False)
if self.track_position is not None:
if self.track_position != "":
self.device.seek(self.track_position)
# reinstate track, position, play mode, cross fade
# Need to make sure there is a proper track selected first
self.device.play_mode = self.play_mode
self.device.cross_fade = self.cross_fade
elif self.is_playing_cloud_queue:
# was playing a cloud queue started by Alexa
# No way yet to re-start this so prevent it throwing an error!
pass
else:
# was playing a stream (radio station, file, or nothing)
# reinstate uri and meta data
if self.media_uri != "":
self.device.play_uri(
self.media_uri, self.media_metadata, start=False)
|
Do the coordinator-only part of the restore.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/snapshot.py#L186-L231
|
[
"def _restore_queue(self):\n \"\"\"Restore the previous state of the queue.\n\n Note:\n The restore currently adds the items back into the queue\n using the URI, for items the Sonos system already knows about\n this is OK, but for other items, they may be missing some of\n their metadata as it will not be automatically picked up.\n \"\"\"\n if self.queue is not None:\n # Clear the queue so that it can be reset\n self.device.clear_queue()\n # Now loop around all the queue entries adding them\n for queue_group in self.queue:\n for queue_item in queue_group:\n self.device.add_uri_to_queue(queue_item.uri)\n"
] |
class Snapshot(object):
"""A snapshot of the current state.
Note:
This does not change anything to do with the configuration
such as which group the speaker is in, just settings that impact
what is playing, or how it is played.
List of sources that may be playing using root of media_uri:
| ``x-rincon-queue``: playing from Queue
| ``x-sonosapi-stream``: playing a stream (eg radio)
| ``x-file-cifs``: playing file
| ``x-rincon``: slave zone (only change volume etc. rest from
coordinator)
"""
def __init__(self, device, snapshot_queue=False):
"""
Args:
device (SoCo): The device to snapshot
snapshot_queue (bool): Whether the queue should be snapshotted.
Defaults to `False`.
Warning:
It is strongly advised that you do not snapshot the queue unless
you really need to as it takes a very long time to restore large
queues as it is done one track at a time.
"""
# The device that will be snapshotted
self.device = device
# The values that will be stored
# For all zones:
self.media_uri = None
self.is_coordinator = False
self.is_playing_queue = False
self.is_playing_cloud_queue = False
self.volume = None
self.mute = None
self.bass = None
self.treble = None
self.loudness = None
# For coordinator zone playing from Queue:
self.play_mode = None
self.cross_fade = None
self.playlist_position = 0
self.track_position = None
# For coordinator zone playing a Stream:
self.media_metadata = None
# For all coordinator zones
self.transport_state = None
self.queue = None
# Only set the queue as a list if we are going to save it
if snapshot_queue:
self.queue = []
def snapshot(self):
"""Record and store the current state of a device.
Returns:
bool: `True` if the device is a coordinator, `False` otherwise.
Useful for determining whether playing an alert on a device
will ungroup it.
"""
# get if device coordinator (or slave) True (or False)
self.is_coordinator = self.device.is_coordinator
# Get information about the currently playing media
media_info = self.device.avTransport.GetMediaInfo([('InstanceID', 0)])
self.media_uri = media_info['CurrentURI']
# Extract source from media uri - below some media URI value examples:
# 'x-rincon-queue:RINCON_000E5859E49601400#0'
# - playing a local queue always #0 for local queue)
#
# 'x-rincon-queue:RINCON_000E5859E49601400#6'
# - playing a cloud queue where #x changes with each queue)
#
# -'x-rincon:RINCON_000E5859E49601400'
# - a slave player pointing to coordinator player
if self.media_uri.split(':')[0] == 'x-rincon-queue':
# The pylint error below is a false positive, see about removing it
# in the future
# pylint: disable=simplifiable-if-statement
if self.media_uri.split('#')[1] == '0':
# playing local queue
self.is_playing_queue = True
else:
# playing cloud queue - started from Alexa
self.is_playing_cloud_queue = True
# Save the volume, mute and other sound settings
self.volume = self.device.volume
self.mute = self.device.mute
self.bass = self.device.bass
self.treble = self.device.treble
self.loudness = self.device.loudness
# get details required for what's playing:
if self.is_playing_queue:
# playing from queue - save repeat, random, cross fade, track, etc.
self.play_mode = self.device.play_mode
self.cross_fade = self.device.cross_fade
# Get information about the currently playing track
track_info = self.device.get_current_track_info()
if track_info is not None:
position = track_info['playlist_position']
if position != "":
# save as integer
self.playlist_position = int(position)
self.track_position = track_info['position']
else:
# playing from a stream - save media metadata
self.media_metadata = media_info['CurrentURIMetaData']
# Work out what the playing state is - if a coordinator
if self.is_coordinator:
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
self.transport_state = transport_info[
'current_transport_state']
# Save of the current queue if we need to
self._save_queue()
# return if device is a coordinator (helps usage)
return self.is_coordinator
# pylint: disable=too-many-branches
def restore(self, fade=False):
"""Restore the state of a device to that which was previously saved.
For coordinator devices restore everything. For slave devices
only restore volume etc., not transport info (transport info
comes from the slave's coordinator).
Args:
fade (bool): Whether volume should be faded up on restore.
"""
try:
if self.is_coordinator:
self._restore_coordinator()
finally:
self._restore_volume(fade)
# Now everything is set, see if we need to be playing, stopped
# or paused ( only for coordinators)
if self.is_coordinator:
if self.transport_state == 'PLAYING':
self.device.play()
elif self.transport_state == 'STOPPED':
self.device.stop()
def _restore_volume(self, fade):
"""Reinstate volume.
Args:
fade (bool): Whether volume should be faded up on restore.
"""
self.device.mute = self.mute
# Can only change volume on device with fixed volume set to False
# otherwise get uPnP error, so check first. Before issuing a network
# command to check, fixed volume always has volume set to 100.
# So only checked fixed volume if volume is 100.
if self.volume == 100:
fixed_vol = self.device.renderingControl.GetOutputFixed(
[('InstanceID', 0)])['CurrentFixed']
else:
fixed_vol = False
# now set volume if not fixed
if not fixed_vol:
self.device.bass = self.bass
self.device.treble = self.treble
self.device.loudness = self.loudness
if fade:
# if fade requested in restore
# set volume to 0 then fade up to saved volume (non blocking)
self.device.volume = 0
self.device.ramp_to_volume(self.volume)
else:
# set volume
self.device.volume = self.volume
def _save_queue(self):
"""Save the current state of the queue."""
if self.queue is not None:
# Maximum batch is 486, anything larger will still only
# return 486
batch_size = 400
total = 0
num_return = batch_size
# Need to get all the tracks in batches, but Only get the next
# batch if all the items requested were in the last batch
while num_return == batch_size:
queue_items = self.device.get_queue(total, batch_size)
# Check how many entries were returned
num_return = len(queue_items)
# Make sure the queue is not empty
if num_return > 0:
self.queue.append(queue_items)
# Update the total that have been processed
total = total + num_return
def _restore_queue(self):
"""Restore the previous state of the queue.
Note:
The restore currently adds the items back into the queue
using the URI, for items the Sonos system already knows about
this is OK, but for other items, they may be missing some of
their metadata as it will not be automatically picked up.
"""
if self.queue is not None:
# Clear the queue so that it can be reset
self.device.clear_queue()
# Now loop around all the queue entries adding them
for queue_group in self.queue:
for queue_item in queue_group:
self.device.add_uri_to_queue(queue_item.uri)
def __enter__(self):
self.snapshot()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.restore()
|
amelchio/pysonos
|
pysonos/snapshot.py
|
Snapshot._restore_volume
|
python
|
def _restore_volume(self, fade):
self.device.mute = self.mute
# Can only change volume on device with fixed volume set to False
# otherwise get uPnP error, so check first. Before issuing a network
# command to check, fixed volume always has volume set to 100.
# So only checked fixed volume if volume is 100.
if self.volume == 100:
fixed_vol = self.device.renderingControl.GetOutputFixed(
[('InstanceID', 0)])['CurrentFixed']
else:
fixed_vol = False
# now set volume if not fixed
if not fixed_vol:
self.device.bass = self.bass
self.device.treble = self.treble
self.device.loudness = self.loudness
if fade:
# if fade requested in restore
# set volume to 0 then fade up to saved volume (non blocking)
self.device.volume = 0
self.device.ramp_to_volume(self.volume)
else:
# set volume
self.device.volume = self.volume
|
Reinstate volume.
Args:
fade (bool): Whether volume should be faded up on restore.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/snapshot.py#L233-L264
| null |
class Snapshot(object):
"""A snapshot of the current state.
Note:
This does not change anything to do with the configuration
such as which group the speaker is in, just settings that impact
what is playing, or how it is played.
List of sources that may be playing using root of media_uri:
| ``x-rincon-queue``: playing from Queue
| ``x-sonosapi-stream``: playing a stream (eg radio)
| ``x-file-cifs``: playing file
| ``x-rincon``: slave zone (only change volume etc. rest from
coordinator)
"""
def __init__(self, device, snapshot_queue=False):
"""
Args:
device (SoCo): The device to snapshot
snapshot_queue (bool): Whether the queue should be snapshotted.
Defaults to `False`.
Warning:
It is strongly advised that you do not snapshot the queue unless
you really need to as it takes a very long time to restore large
queues as it is done one track at a time.
"""
# The device that will be snapshotted
self.device = device
# The values that will be stored
# For all zones:
self.media_uri = None
self.is_coordinator = False
self.is_playing_queue = False
self.is_playing_cloud_queue = False
self.volume = None
self.mute = None
self.bass = None
self.treble = None
self.loudness = None
# For coordinator zone playing from Queue:
self.play_mode = None
self.cross_fade = None
self.playlist_position = 0
self.track_position = None
# For coordinator zone playing a Stream:
self.media_metadata = None
# For all coordinator zones
self.transport_state = None
self.queue = None
# Only set the queue as a list if we are going to save it
if snapshot_queue:
self.queue = []
def snapshot(self):
"""Record and store the current state of a device.
Returns:
bool: `True` if the device is a coordinator, `False` otherwise.
Useful for determining whether playing an alert on a device
will ungroup it.
"""
# get if device coordinator (or slave) True (or False)
self.is_coordinator = self.device.is_coordinator
# Get information about the currently playing media
media_info = self.device.avTransport.GetMediaInfo([('InstanceID', 0)])
self.media_uri = media_info['CurrentURI']
# Extract source from media uri - below some media URI value examples:
# 'x-rincon-queue:RINCON_000E5859E49601400#0'
# - playing a local queue always #0 for local queue)
#
# 'x-rincon-queue:RINCON_000E5859E49601400#6'
# - playing a cloud queue where #x changes with each queue)
#
# -'x-rincon:RINCON_000E5859E49601400'
# - a slave player pointing to coordinator player
if self.media_uri.split(':')[0] == 'x-rincon-queue':
# The pylint error below is a false positive, see about removing it
# in the future
# pylint: disable=simplifiable-if-statement
if self.media_uri.split('#')[1] == '0':
# playing local queue
self.is_playing_queue = True
else:
# playing cloud queue - started from Alexa
self.is_playing_cloud_queue = True
# Save the volume, mute and other sound settings
self.volume = self.device.volume
self.mute = self.device.mute
self.bass = self.device.bass
self.treble = self.device.treble
self.loudness = self.device.loudness
# get details required for what's playing:
if self.is_playing_queue:
# playing from queue - save repeat, random, cross fade, track, etc.
self.play_mode = self.device.play_mode
self.cross_fade = self.device.cross_fade
# Get information about the currently playing track
track_info = self.device.get_current_track_info()
if track_info is not None:
position = track_info['playlist_position']
if position != "":
# save as integer
self.playlist_position = int(position)
self.track_position = track_info['position']
else:
# playing from a stream - save media metadata
self.media_metadata = media_info['CurrentURIMetaData']
# Work out what the playing state is - if a coordinator
if self.is_coordinator:
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
self.transport_state = transport_info[
'current_transport_state']
# Save of the current queue if we need to
self._save_queue()
# return if device is a coordinator (helps usage)
return self.is_coordinator
# pylint: disable=too-many-branches
def restore(self, fade=False):
"""Restore the state of a device to that which was previously saved.
For coordinator devices restore everything. For slave devices
only restore volume etc., not transport info (transport info
comes from the slave's coordinator).
Args:
fade (bool): Whether volume should be faded up on restore.
"""
try:
if self.is_coordinator:
self._restore_coordinator()
finally:
self._restore_volume(fade)
# Now everything is set, see if we need to be playing, stopped
# or paused ( only for coordinators)
if self.is_coordinator:
if self.transport_state == 'PLAYING':
self.device.play()
elif self.transport_state == 'STOPPED':
self.device.stop()
def _restore_coordinator(self):
"""Do the coordinator-only part of the restore."""
# Start by ensuring that the speaker is paused as we don't want
# things all rolling back when we are changing them, as this could
# include things like audio
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
if transport_info['current_transport_state'] == 'PLAYING':
self.device.pause()
# Check if the queue should be restored
self._restore_queue()
# Reinstate what was playing
if self.is_playing_queue and self.playlist_position > 0:
# was playing from playlist
if self.playlist_position is not None:
# The position in the playlist returned by
# get_current_track_info starts at 1, but when
# playing from playlist, the index starts at 0
# if position > 0:
self.playlist_position -= 1
self.device.play_from_queue(self.playlist_position, False)
if self.track_position is not None:
if self.track_position != "":
self.device.seek(self.track_position)
# reinstate track, position, play mode, cross fade
# Need to make sure there is a proper track selected first
self.device.play_mode = self.play_mode
self.device.cross_fade = self.cross_fade
elif self.is_playing_cloud_queue:
# was playing a cloud queue started by Alexa
# No way yet to re-start this so prevent it throwing an error!
pass
else:
# was playing a stream (radio station, file, or nothing)
# reinstate uri and meta data
if self.media_uri != "":
self.device.play_uri(
self.media_uri, self.media_metadata, start=False)
def _save_queue(self):
"""Save the current state of the queue."""
if self.queue is not None:
# Maximum batch is 486, anything larger will still only
# return 486
batch_size = 400
total = 0
num_return = batch_size
# Need to get all the tracks in batches, but Only get the next
# batch if all the items requested were in the last batch
while num_return == batch_size:
queue_items = self.device.get_queue(total, batch_size)
# Check how many entries were returned
num_return = len(queue_items)
# Make sure the queue is not empty
if num_return > 0:
self.queue.append(queue_items)
# Update the total that have been processed
total = total + num_return
def _restore_queue(self):
"""Restore the previous state of the queue.
Note:
The restore currently adds the items back into the queue
using the URI, for items the Sonos system already knows about
this is OK, but for other items, they may be missing some of
their metadata as it will not be automatically picked up.
"""
if self.queue is not None:
# Clear the queue so that it can be reset
self.device.clear_queue()
# Now loop around all the queue entries adding them
for queue_group in self.queue:
for queue_item in queue_group:
self.device.add_uri_to_queue(queue_item.uri)
def __enter__(self):
self.snapshot()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.restore()
|
amelchio/pysonos
|
pysonos/discovery.py
|
_discover_thread
|
python
|
def _discover_thread(callback,
timeout,
include_invisible,
interface_addr):
def create_socket(interface_addr=None):
""" A helper function for creating a socket for discover purposes.
Create and return a socket with appropriate options set for multicast.
"""
_sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# UPnP v1.0 requires a TTL of 4
_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL,
struct.pack("B", 4))
_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if interface_addr is not None:
_sock.setsockopt(
socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
socket.inet_aton(interface_addr))
return _sock
# pylint: disable=invalid-name
PLAYER_SEARCH = dedent("""\
M-SEARCH * HTTP/1.1
HOST: 239.255.255.250:1900
MAN: "ssdp:discover"
MX: 1
ST: urn:schemas-upnp-org:device:ZonePlayer:1
""").encode('utf-8')
BCAST_ADDR = "255.255.255.255"
MCAST_GRP = "239.255.255.250"
MCAST_PORT = 1900
_sockets = {}
# Use the specified interface, if any
if interface_addr is not None:
try:
address = socket.inet_aton(interface_addr)
except socket.error:
raise ValueError("{0} is not a valid IP address string".format(
interface_addr))
_sockets[interface_addr] = create_socket(interface_addr)
_LOG.info("Sending discovery packets on default interface")
else:
# Find the local network addresses using ifaddr.
addresses = [
ip.ip
for adapter in ifaddr.get_adapters()
for ip in adapter.ips
if ip.is_IPv4
if ip.ip != "127.0.0.1"
]
# Create a socket for each unique address found, and one for the
# default multicast address
for address in addresses:
try:
_sockets[address] = create_socket(address)
except socket.error as e:
_LOG.warning("Can't make a discovery socket for %s: %s: %s",
address, e.__class__.__name__, e)
found_zones = set()
deadline = time.monotonic() + timeout
last_response = None
while not threading.current_thread().stopped():
time_left = deadline - time.monotonic()
if time_left < 0:
break
# Repeated sending, UDP is unreliable
if last_response is None or last_response < time.monotonic() - 1:
for _addr, _sock in _sockets.items():
try:
_LOG.info("Sending discovery packets on %s", _addr)
_sock.sendto(
really_utf8(PLAYER_SEARCH), (MCAST_GRP, MCAST_PORT))
_sock.sendto(
really_utf8(PLAYER_SEARCH), (BCAST_ADDR, MCAST_PORT))
except OSError:
_LOG.info("Discovery failed on %s", _addr)
response, _, _ = select.select(
list(_sockets.values()), [], [], min(1, time_left))
# Only Zone Players should respond, given the value of ST in the
# PLAYER_SEARCH message. However, to prevent misbehaved devices
# on the network disrupting the discovery process, we check that
# the response contains the "Sonos" string; otherwise we keep
# waiting for a correct response.
#
# Here is a sample response from a real Sonos device (actual numbers
# have been redacted):
# HTTP/1.1 200 OK
# CACHE-CONTROL: max-age = 1800
# EXT:
# LOCATION: http://***.***.***.***:1400/xml/device_description.xml
# SERVER: Linux UPnP/1.0 Sonos/26.1-76230 (ZPS3)
# ST: urn:schemas-upnp-org:device:ZonePlayer:1
# USN: uuid:RINCON_B8*************00::urn:schemas-upnp-org:device:
# ZonePlayer:1
# X-RINCON-BOOTSEQ: 3
# X-RINCON-HOUSEHOLD: Sonos_7O********************R7eU
for _sock in response:
last_response = time.monotonic()
data, addr = _sock.recvfrom(1024)
_LOG.debug(
'Received discovery response from %s: "%s"', addr, data
)
if b"Sonos" in data:
# pylint: disable=not-callable
zone = config.SOCO_CLASS(addr[0])
if zone not in found_zones:
if zone.is_visible or include_invisible:
found_zones.add(zone)
callback(zone)
|
Discover Sonos zones on the local network.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/discovery.py#L40-L159
| null |
# -*- coding: utf-8 -*-
"""This module contains methods for discovering Sonos devices on the
network."""
from __future__ import unicode_literals
import logging
import threading
import socket
import select
from textwrap import dedent
import time
import struct
import ifaddr
from . import config
from .utils import really_utf8
_LOG = logging.getLogger(__name__)
# pylint: disable=too-many-locals, too-many-branches
class StoppableThread(threading.Thread):
""" Thread class with a stop() method. """
def __init__(self, target, args):
super().__init__(target=target, args=args)
self._stop_event = threading.Event()
def stop(self):
"""Ask the thread to stop."""
self._stop_event.set()
def stopped(self):
"""Returns True if stop() has been called."""
return self._stop_event.is_set()
def discover_thread(callback,
timeout=5,
include_invisible=False,
interface_addr=None):
""" Return a started thread with a discovery callback. """
thread = StoppableThread(
target=_discover_thread,
args=(callback, timeout, include_invisible, interface_addr))
thread.start()
return thread
def discover(timeout=5,
include_invisible=False,
interface_addr=None,
all_households=False):
""" Discover Sonos zones on the local network.
Return a set of `SoCo` instances for each zone found.
Include invisible zones (bridges and slave zones in stereo pairs if
``include_invisible`` is `True`. Will block for up to ``timeout`` seconds,
after which return `None` if no zones found.
Args:
timeout (int, optional): block for this many seconds, at most.
Defaults to 5.
include_invisible (bool, optional): include invisible zones in the
return set. Defaults to `False`.
interface_addr (str or None): Discovery operates by sending UDP
multicast datagrams. ``interface_addr`` is a string (dotted
quad) representation of the network interface address to use as
the source of the datagrams (i.e. it is a value for
`socket.IP_MULTICAST_IF <socket>`). If `None` or not specified,
all system interfaces will be tried. Defaults to `None`.
all_households (bool, optional): wait for all replies to discover
multiple households. If `False` or not specified, return only
the first household found.
Returns:
set: a set of `SoCo` instances, one for each zone found, or else
`None`.
"""
found_zones = set()
first_response = None
def callback(zone):
nonlocal first_response
if first_response is None:
first_response = time.monotonic()
if include_invisible:
found_zones.update(zone.all_zones)
else:
found_zones.update(zone.visible_zones)
if not all_households:
thread.stop()
thread = discover_thread(
callback, timeout, include_invisible, interface_addr)
while thread.is_alive() and not thread.stopped():
if first_response is None:
thread.join(timeout=1)
else:
thread.join(timeout=first_response + 1 - time.monotonic())
thread.stop()
return found_zones or None
def any_soco():
"""Return any visible soco device, for when it doesn't matter which.
Try to obtain an existing instance, or use `discover` if necessary.
Note that this assumes that the existing instance has not left
the network.
Returns:
SoCo: A `SoCo` instance (or subclass if `config.SOCO_CLASS` is set,
or `None` if no instances are found
"""
cls = config.SOCO_CLASS
# pylint: disable=no-member, protected-access
try:
# Try to get the first pre-existing soco instance we know about,
# as long as it is visible (i.e. not a bridge etc). Otherwise,
# perform discovery (again, excluding invisibles) and return one of
# those
device = next(d for d in cls._instances[cls._class_group].values()
if d.is_visible)
except (KeyError, StopIteration):
devices = discover()
return None if devices is None else devices.pop()
return device
def by_name(name):
"""Return a device by name.
Args:
name (str): The name of the device to return.
Returns:
:class:`~.SoCo`: The first device encountered among all zone with the
given player name. If none are found `None` is returned.
"""
devices = discover(all_households=True)
for device in (devices or []):
if device.player_name == name:
return device
return None
|
amelchio/pysonos
|
pysonos/discovery.py
|
discover_thread
|
python
|
def discover_thread(callback,
timeout=5,
include_invisible=False,
interface_addr=None):
thread = StoppableThread(
target=_discover_thread,
args=(callback, timeout, include_invisible, interface_addr))
thread.start()
return thread
|
Return a started thread with a discovery callback.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/discovery.py#L162-L171
| null |
# -*- coding: utf-8 -*-
"""This module contains methods for discovering Sonos devices on the
network."""
from __future__ import unicode_literals
import logging
import threading
import socket
import select
from textwrap import dedent
import time
import struct
import ifaddr
from . import config
from .utils import really_utf8
_LOG = logging.getLogger(__name__)
# pylint: disable=too-many-locals, too-many-branches
class StoppableThread(threading.Thread):
""" Thread class with a stop() method. """
def __init__(self, target, args):
super().__init__(target=target, args=args)
self._stop_event = threading.Event()
def stop(self):
"""Ask the thread to stop."""
self._stop_event.set()
def stopped(self):
"""Returns True if stop() has been called."""
return self._stop_event.is_set()
def _discover_thread(callback,
timeout,
include_invisible,
interface_addr):
""" Discover Sonos zones on the local network. """
def create_socket(interface_addr=None):
""" A helper function for creating a socket for discover purposes.
Create and return a socket with appropriate options set for multicast.
"""
_sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# UPnP v1.0 requires a TTL of 4
_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL,
struct.pack("B", 4))
_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if interface_addr is not None:
_sock.setsockopt(
socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
socket.inet_aton(interface_addr))
return _sock
# pylint: disable=invalid-name
PLAYER_SEARCH = dedent("""\
M-SEARCH * HTTP/1.1
HOST: 239.255.255.250:1900
MAN: "ssdp:discover"
MX: 1
ST: urn:schemas-upnp-org:device:ZonePlayer:1
""").encode('utf-8')
BCAST_ADDR = "255.255.255.255"
MCAST_GRP = "239.255.255.250"
MCAST_PORT = 1900
_sockets = {}
# Use the specified interface, if any
if interface_addr is not None:
try:
address = socket.inet_aton(interface_addr)
except socket.error:
raise ValueError("{0} is not a valid IP address string".format(
interface_addr))
_sockets[interface_addr] = create_socket(interface_addr)
_LOG.info("Sending discovery packets on default interface")
else:
# Find the local network addresses using ifaddr.
addresses = [
ip.ip
for adapter in ifaddr.get_adapters()
for ip in adapter.ips
if ip.is_IPv4
if ip.ip != "127.0.0.1"
]
# Create a socket for each unique address found, and one for the
# default multicast address
for address in addresses:
try:
_sockets[address] = create_socket(address)
except socket.error as e:
_LOG.warning("Can't make a discovery socket for %s: %s: %s",
address, e.__class__.__name__, e)
found_zones = set()
deadline = time.monotonic() + timeout
last_response = None
while not threading.current_thread().stopped():
time_left = deadline - time.monotonic()
if time_left < 0:
break
# Repeated sending, UDP is unreliable
if last_response is None or last_response < time.monotonic() - 1:
for _addr, _sock in _sockets.items():
try:
_LOG.info("Sending discovery packets on %s", _addr)
_sock.sendto(
really_utf8(PLAYER_SEARCH), (MCAST_GRP, MCAST_PORT))
_sock.sendto(
really_utf8(PLAYER_SEARCH), (BCAST_ADDR, MCAST_PORT))
except OSError:
_LOG.info("Discovery failed on %s", _addr)
response, _, _ = select.select(
list(_sockets.values()), [], [], min(1, time_left))
# Only Zone Players should respond, given the value of ST in the
# PLAYER_SEARCH message. However, to prevent misbehaved devices
# on the network disrupting the discovery process, we check that
# the response contains the "Sonos" string; otherwise we keep
# waiting for a correct response.
#
# Here is a sample response from a real Sonos device (actual numbers
# have been redacted):
# HTTP/1.1 200 OK
# CACHE-CONTROL: max-age = 1800
# EXT:
# LOCATION: http://***.***.***.***:1400/xml/device_description.xml
# SERVER: Linux UPnP/1.0 Sonos/26.1-76230 (ZPS3)
# ST: urn:schemas-upnp-org:device:ZonePlayer:1
# USN: uuid:RINCON_B8*************00::urn:schemas-upnp-org:device:
# ZonePlayer:1
# X-RINCON-BOOTSEQ: 3
# X-RINCON-HOUSEHOLD: Sonos_7O********************R7eU
for _sock in response:
last_response = time.monotonic()
data, addr = _sock.recvfrom(1024)
_LOG.debug(
'Received discovery response from %s: "%s"', addr, data
)
if b"Sonos" in data:
# pylint: disable=not-callable
zone = config.SOCO_CLASS(addr[0])
if zone not in found_zones:
if zone.is_visible or include_invisible:
found_zones.add(zone)
callback(zone)
def discover(timeout=5,
include_invisible=False,
interface_addr=None,
all_households=False):
""" Discover Sonos zones on the local network.
Return a set of `SoCo` instances for each zone found.
Include invisible zones (bridges and slave zones in stereo pairs if
``include_invisible`` is `True`. Will block for up to ``timeout`` seconds,
after which return `None` if no zones found.
Args:
timeout (int, optional): block for this many seconds, at most.
Defaults to 5.
include_invisible (bool, optional): include invisible zones in the
return set. Defaults to `False`.
interface_addr (str or None): Discovery operates by sending UDP
multicast datagrams. ``interface_addr`` is a string (dotted
quad) representation of the network interface address to use as
the source of the datagrams (i.e. it is a value for
`socket.IP_MULTICAST_IF <socket>`). If `None` or not specified,
all system interfaces will be tried. Defaults to `None`.
all_households (bool, optional): wait for all replies to discover
multiple households. If `False` or not specified, return only
the first household found.
Returns:
set: a set of `SoCo` instances, one for each zone found, or else
`None`.
"""
found_zones = set()
first_response = None
def callback(zone):
nonlocal first_response
if first_response is None:
first_response = time.monotonic()
if include_invisible:
found_zones.update(zone.all_zones)
else:
found_zones.update(zone.visible_zones)
if not all_households:
thread.stop()
thread = discover_thread(
callback, timeout, include_invisible, interface_addr)
while thread.is_alive() and not thread.stopped():
if first_response is None:
thread.join(timeout=1)
else:
thread.join(timeout=first_response + 1 - time.monotonic())
thread.stop()
return found_zones or None
def any_soco():
"""Return any visible soco device, for when it doesn't matter which.
Try to obtain an existing instance, or use `discover` if necessary.
Note that this assumes that the existing instance has not left
the network.
Returns:
SoCo: A `SoCo` instance (or subclass if `config.SOCO_CLASS` is set,
or `None` if no instances are found
"""
cls = config.SOCO_CLASS
# pylint: disable=no-member, protected-access
try:
# Try to get the first pre-existing soco instance we know about,
# as long as it is visible (i.e. not a bridge etc). Otherwise,
# perform discovery (again, excluding invisibles) and return one of
# those
device = next(d for d in cls._instances[cls._class_group].values()
if d.is_visible)
except (KeyError, StopIteration):
devices = discover()
return None if devices is None else devices.pop()
return device
def by_name(name):
"""Return a device by name.
Args:
name (str): The name of the device to return.
Returns:
:class:`~.SoCo`: The first device encountered among all zone with the
given player name. If none are found `None` is returned.
"""
devices = discover(all_households=True)
for device in (devices or []):
if device.player_name == name:
return device
return None
|
amelchio/pysonos
|
pysonos/discovery.py
|
discover
|
python
|
def discover(timeout=5,
include_invisible=False,
interface_addr=None,
all_households=False):
found_zones = set()
first_response = None
def callback(zone):
nonlocal first_response
if first_response is None:
first_response = time.monotonic()
if include_invisible:
found_zones.update(zone.all_zones)
else:
found_zones.update(zone.visible_zones)
if not all_households:
thread.stop()
thread = discover_thread(
callback, timeout, include_invisible, interface_addr)
while thread.is_alive() and not thread.stopped():
if first_response is None:
thread.join(timeout=1)
else:
thread.join(timeout=first_response + 1 - time.monotonic())
thread.stop()
return found_zones or None
|
Discover Sonos zones on the local network.
Return a set of `SoCo` instances for each zone found.
Include invisible zones (bridges and slave zones in stereo pairs if
``include_invisible`` is `True`. Will block for up to ``timeout`` seconds,
after which return `None` if no zones found.
Args:
timeout (int, optional): block for this many seconds, at most.
Defaults to 5.
include_invisible (bool, optional): include invisible zones in the
return set. Defaults to `False`.
interface_addr (str or None): Discovery operates by sending UDP
multicast datagrams. ``interface_addr`` is a string (dotted
quad) representation of the network interface address to use as
the source of the datagrams (i.e. it is a value for
`socket.IP_MULTICAST_IF <socket>`). If `None` or not specified,
all system interfaces will be tried. Defaults to `None`.
all_households (bool, optional): wait for all replies to discover
multiple households. If `False` or not specified, return only
the first household found.
Returns:
set: a set of `SoCo` instances, one for each zone found, or else
`None`.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/discovery.py#L174-L231
|
[
"def discover_thread(callback,\n timeout=5,\n include_invisible=False,\n interface_addr=None):\n \"\"\" Return a started thread with a discovery callback. \"\"\"\n thread = StoppableThread(\n target=_discover_thread,\n args=(callback, timeout, include_invisible, interface_addr))\n thread.start()\n return thread\n"
] |
# -*- coding: utf-8 -*-
"""This module contains methods for discovering Sonos devices on the
network."""
from __future__ import unicode_literals
import logging
import threading
import socket
import select
from textwrap import dedent
import time
import struct
import ifaddr
from . import config
from .utils import really_utf8
_LOG = logging.getLogger(__name__)
# pylint: disable=too-many-locals, too-many-branches
class StoppableThread(threading.Thread):
""" Thread class with a stop() method. """
def __init__(self, target, args):
super().__init__(target=target, args=args)
self._stop_event = threading.Event()
def stop(self):
"""Ask the thread to stop."""
self._stop_event.set()
def stopped(self):
"""Returns True if stop() has been called."""
return self._stop_event.is_set()
def _discover_thread(callback,
timeout,
include_invisible,
interface_addr):
""" Discover Sonos zones on the local network. """
def create_socket(interface_addr=None):
""" A helper function for creating a socket for discover purposes.
Create and return a socket with appropriate options set for multicast.
"""
_sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# UPnP v1.0 requires a TTL of 4
_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL,
struct.pack("B", 4))
_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if interface_addr is not None:
_sock.setsockopt(
socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
socket.inet_aton(interface_addr))
return _sock
# pylint: disable=invalid-name
PLAYER_SEARCH = dedent("""\
M-SEARCH * HTTP/1.1
HOST: 239.255.255.250:1900
MAN: "ssdp:discover"
MX: 1
ST: urn:schemas-upnp-org:device:ZonePlayer:1
""").encode('utf-8')
BCAST_ADDR = "255.255.255.255"
MCAST_GRP = "239.255.255.250"
MCAST_PORT = 1900
_sockets = {}
# Use the specified interface, if any
if interface_addr is not None:
try:
address = socket.inet_aton(interface_addr)
except socket.error:
raise ValueError("{0} is not a valid IP address string".format(
interface_addr))
_sockets[interface_addr] = create_socket(interface_addr)
_LOG.info("Sending discovery packets on default interface")
else:
# Find the local network addresses using ifaddr.
addresses = [
ip.ip
for adapter in ifaddr.get_adapters()
for ip in adapter.ips
if ip.is_IPv4
if ip.ip != "127.0.0.1"
]
# Create a socket for each unique address found, and one for the
# default multicast address
for address in addresses:
try:
_sockets[address] = create_socket(address)
except socket.error as e:
_LOG.warning("Can't make a discovery socket for %s: %s: %s",
address, e.__class__.__name__, e)
found_zones = set()
deadline = time.monotonic() + timeout
last_response = None
while not threading.current_thread().stopped():
time_left = deadline - time.monotonic()
if time_left < 0:
break
# Repeated sending, UDP is unreliable
if last_response is None or last_response < time.monotonic() - 1:
for _addr, _sock in _sockets.items():
try:
_LOG.info("Sending discovery packets on %s", _addr)
_sock.sendto(
really_utf8(PLAYER_SEARCH), (MCAST_GRP, MCAST_PORT))
_sock.sendto(
really_utf8(PLAYER_SEARCH), (BCAST_ADDR, MCAST_PORT))
except OSError:
_LOG.info("Discovery failed on %s", _addr)
response, _, _ = select.select(
list(_sockets.values()), [], [], min(1, time_left))
# Only Zone Players should respond, given the value of ST in the
# PLAYER_SEARCH message. However, to prevent misbehaved devices
# on the network disrupting the discovery process, we check that
# the response contains the "Sonos" string; otherwise we keep
# waiting for a correct response.
#
# Here is a sample response from a real Sonos device (actual numbers
# have been redacted):
# HTTP/1.1 200 OK
# CACHE-CONTROL: max-age = 1800
# EXT:
# LOCATION: http://***.***.***.***:1400/xml/device_description.xml
# SERVER: Linux UPnP/1.0 Sonos/26.1-76230 (ZPS3)
# ST: urn:schemas-upnp-org:device:ZonePlayer:1
# USN: uuid:RINCON_B8*************00::urn:schemas-upnp-org:device:
# ZonePlayer:1
# X-RINCON-BOOTSEQ: 3
# X-RINCON-HOUSEHOLD: Sonos_7O********************R7eU
for _sock in response:
last_response = time.monotonic()
data, addr = _sock.recvfrom(1024)
_LOG.debug(
'Received discovery response from %s: "%s"', addr, data
)
if b"Sonos" in data:
# pylint: disable=not-callable
zone = config.SOCO_CLASS(addr[0])
if zone not in found_zones:
if zone.is_visible or include_invisible:
found_zones.add(zone)
callback(zone)
def discover_thread(callback,
timeout=5,
include_invisible=False,
interface_addr=None):
""" Return a started thread with a discovery callback. """
thread = StoppableThread(
target=_discover_thread,
args=(callback, timeout, include_invisible, interface_addr))
thread.start()
return thread
def any_soco():
"""Return any visible soco device, for when it doesn't matter which.
Try to obtain an existing instance, or use `discover` if necessary.
Note that this assumes that the existing instance has not left
the network.
Returns:
SoCo: A `SoCo` instance (or subclass if `config.SOCO_CLASS` is set,
or `None` if no instances are found
"""
cls = config.SOCO_CLASS
# pylint: disable=no-member, protected-access
try:
# Try to get the first pre-existing soco instance we know about,
# as long as it is visible (i.e. not a bridge etc). Otherwise,
# perform discovery (again, excluding invisibles) and return one of
# those
device = next(d for d in cls._instances[cls._class_group].values()
if d.is_visible)
except (KeyError, StopIteration):
devices = discover()
return None if devices is None else devices.pop()
return device
def by_name(name):
"""Return a device by name.
Args:
name (str): The name of the device to return.
Returns:
:class:`~.SoCo`: The first device encountered among all zone with the
given player name. If none are found `None` is returned.
"""
devices = discover(all_households=True)
for device in (devices or []):
if device.player_name == name:
return device
return None
|
amelchio/pysonos
|
pysonos/discovery.py
|
by_name
|
python
|
def by_name(name):
devices = discover(all_households=True)
for device in (devices or []):
if device.player_name == name:
return device
return None
|
Return a device by name.
Args:
name (str): The name of the device to return.
Returns:
:class:`~.SoCo`: The first device encountered among all zone with the
given player name. If none are found `None` is returned.
|
train
|
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/discovery.py#L262-L276
|
[
"def discover(timeout=5,\n include_invisible=False,\n interface_addr=None,\n all_households=False):\n \"\"\" Discover Sonos zones on the local network.\n\n Return a set of `SoCo` instances for each zone found.\n Include invisible zones (bridges and slave zones in stereo pairs if\n ``include_invisible`` is `True`. Will block for up to ``timeout`` seconds,\n after which return `None` if no zones found.\n\n Args:\n timeout (int, optional): block for this many seconds, at most.\n Defaults to 5.\n include_invisible (bool, optional): include invisible zones in the\n return set. Defaults to `False`.\n interface_addr (str or None): Discovery operates by sending UDP\n multicast datagrams. ``interface_addr`` is a string (dotted\n quad) representation of the network interface address to use as\n the source of the datagrams (i.e. it is a value for\n `socket.IP_MULTICAST_IF <socket>`). If `None` or not specified,\n all system interfaces will be tried. Defaults to `None`.\n all_households (bool, optional): wait for all replies to discover\n multiple households. If `False` or not specified, return only\n the first household found.\n Returns:\n set: a set of `SoCo` instances, one for each zone found, or else\n `None`.\n\n \"\"\"\n\n found_zones = set()\n first_response = None\n\n def callback(zone):\n nonlocal first_response\n\n if first_response is None:\n first_response = time.monotonic()\n\n if include_invisible:\n found_zones.update(zone.all_zones)\n else:\n found_zones.update(zone.visible_zones)\n\n if not all_households:\n thread.stop()\n\n thread = discover_thread(\n callback, timeout, include_invisible, interface_addr)\n while thread.is_alive() and not thread.stopped():\n if first_response is None:\n thread.join(timeout=1)\n else:\n thread.join(timeout=first_response + 1 - time.monotonic())\n thread.stop()\n\n return found_zones or None\n"
] |
# -*- coding: utf-8 -*-
"""This module contains methods for discovering Sonos devices on the
network."""
from __future__ import unicode_literals
import logging
import threading
import socket
import select
from textwrap import dedent
import time
import struct
import ifaddr
from . import config
from .utils import really_utf8
_LOG = logging.getLogger(__name__)
# pylint: disable=too-many-locals, too-many-branches
class StoppableThread(threading.Thread):
""" Thread class with a stop() method. """
def __init__(self, target, args):
super().__init__(target=target, args=args)
self._stop_event = threading.Event()
def stop(self):
"""Ask the thread to stop."""
self._stop_event.set()
def stopped(self):
"""Returns True if stop() has been called."""
return self._stop_event.is_set()
def _discover_thread(callback,
timeout,
include_invisible,
interface_addr):
""" Discover Sonos zones on the local network. """
def create_socket(interface_addr=None):
""" A helper function for creating a socket for discover purposes.
Create and return a socket with appropriate options set for multicast.
"""
_sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# UPnP v1.0 requires a TTL of 4
_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL,
struct.pack("B", 4))
_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if interface_addr is not None:
_sock.setsockopt(
socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
socket.inet_aton(interface_addr))
return _sock
# pylint: disable=invalid-name
PLAYER_SEARCH = dedent("""\
M-SEARCH * HTTP/1.1
HOST: 239.255.255.250:1900
MAN: "ssdp:discover"
MX: 1
ST: urn:schemas-upnp-org:device:ZonePlayer:1
""").encode('utf-8')
BCAST_ADDR = "255.255.255.255"
MCAST_GRP = "239.255.255.250"
MCAST_PORT = 1900
_sockets = {}
# Use the specified interface, if any
if interface_addr is not None:
try:
address = socket.inet_aton(interface_addr)
except socket.error:
raise ValueError("{0} is not a valid IP address string".format(
interface_addr))
_sockets[interface_addr] = create_socket(interface_addr)
_LOG.info("Sending discovery packets on default interface")
else:
# Find the local network addresses using ifaddr.
addresses = [
ip.ip
for adapter in ifaddr.get_adapters()
for ip in adapter.ips
if ip.is_IPv4
if ip.ip != "127.0.0.1"
]
# Create a socket for each unique address found, and one for the
# default multicast address
for address in addresses:
try:
_sockets[address] = create_socket(address)
except socket.error as e:
_LOG.warning("Can't make a discovery socket for %s: %s: %s",
address, e.__class__.__name__, e)
found_zones = set()
deadline = time.monotonic() + timeout
last_response = None
while not threading.current_thread().stopped():
time_left = deadline - time.monotonic()
if time_left < 0:
break
# Repeated sending, UDP is unreliable
if last_response is None or last_response < time.monotonic() - 1:
for _addr, _sock in _sockets.items():
try:
_LOG.info("Sending discovery packets on %s", _addr)
_sock.sendto(
really_utf8(PLAYER_SEARCH), (MCAST_GRP, MCAST_PORT))
_sock.sendto(
really_utf8(PLAYER_SEARCH), (BCAST_ADDR, MCAST_PORT))
except OSError:
_LOG.info("Discovery failed on %s", _addr)
response, _, _ = select.select(
list(_sockets.values()), [], [], min(1, time_left))
# Only Zone Players should respond, given the value of ST in the
# PLAYER_SEARCH message. However, to prevent misbehaved devices
# on the network disrupting the discovery process, we check that
# the response contains the "Sonos" string; otherwise we keep
# waiting for a correct response.
#
# Here is a sample response from a real Sonos device (actual numbers
# have been redacted):
# HTTP/1.1 200 OK
# CACHE-CONTROL: max-age = 1800
# EXT:
# LOCATION: http://***.***.***.***:1400/xml/device_description.xml
# SERVER: Linux UPnP/1.0 Sonos/26.1-76230 (ZPS3)
# ST: urn:schemas-upnp-org:device:ZonePlayer:1
# USN: uuid:RINCON_B8*************00::urn:schemas-upnp-org:device:
# ZonePlayer:1
# X-RINCON-BOOTSEQ: 3
# X-RINCON-HOUSEHOLD: Sonos_7O********************R7eU
for _sock in response:
last_response = time.monotonic()
data, addr = _sock.recvfrom(1024)
_LOG.debug(
'Received discovery response from %s: "%s"', addr, data
)
if b"Sonos" in data:
# pylint: disable=not-callable
zone = config.SOCO_CLASS(addr[0])
if zone not in found_zones:
if zone.is_visible or include_invisible:
found_zones.add(zone)
callback(zone)
def discover_thread(callback,
timeout=5,
include_invisible=False,
interface_addr=None):
""" Return a started thread with a discovery callback. """
thread = StoppableThread(
target=_discover_thread,
args=(callback, timeout, include_invisible, interface_addr))
thread.start()
return thread
def discover(timeout=5,
include_invisible=False,
interface_addr=None,
all_households=False):
""" Discover Sonos zones on the local network.
Return a set of `SoCo` instances for each zone found.
Include invisible zones (bridges and slave zones in stereo pairs if
``include_invisible`` is `True`. Will block for up to ``timeout`` seconds,
after which return `None` if no zones found.
Args:
timeout (int, optional): block for this many seconds, at most.
Defaults to 5.
include_invisible (bool, optional): include invisible zones in the
return set. Defaults to `False`.
interface_addr (str or None): Discovery operates by sending UDP
multicast datagrams. ``interface_addr`` is a string (dotted
quad) representation of the network interface address to use as
the source of the datagrams (i.e. it is a value for
`socket.IP_MULTICAST_IF <socket>`). If `None` or not specified,
all system interfaces will be tried. Defaults to `None`.
all_households (bool, optional): wait for all replies to discover
multiple households. If `False` or not specified, return only
the first household found.
Returns:
set: a set of `SoCo` instances, one for each zone found, or else
`None`.
"""
found_zones = set()
first_response = None
def callback(zone):
nonlocal first_response
if first_response is None:
first_response = time.monotonic()
if include_invisible:
found_zones.update(zone.all_zones)
else:
found_zones.update(zone.visible_zones)
if not all_households:
thread.stop()
thread = discover_thread(
callback, timeout, include_invisible, interface_addr)
while thread.is_alive() and not thread.stopped():
if first_response is None:
thread.join(timeout=1)
else:
thread.join(timeout=first_response + 1 - time.monotonic())
thread.stop()
return found_zones or None
def any_soco():
"""Return any visible soco device, for when it doesn't matter which.
Try to obtain an existing instance, or use `discover` if necessary.
Note that this assumes that the existing instance has not left
the network.
Returns:
SoCo: A `SoCo` instance (or subclass if `config.SOCO_CLASS` is set,
or `None` if no instances are found
"""
cls = config.SOCO_CLASS
# pylint: disable=no-member, protected-access
try:
# Try to get the first pre-existing soco instance we know about,
# as long as it is visible (i.e. not a bridge etc). Otherwise,
# perform discovery (again, excluding invisibles) and return one of
# those
device = next(d for d in cls._instances[cls._class_group].values()
if d.is_visible)
except (KeyError, StopIteration):
devices = discover()
return None if devices is None else devices.pop()
return device
|
dshean/demcoreg
|
demcoreg/glas_proc.py
|
main
|
python
|
def main():
parser = getparser()
args = parser.parse_args()
fn = args.fn
sitename = args.sitename
#User-specified output extent
#Note: not checked, untested
if args.extent is not None:
extent = (args.extent).split()
else:
extent = (geolib.site_dict[sitename]).extent
if args.refdem_fn is not None:
refdem_fn = args.refdem_fn
else:
refdem_fn = (geolib.site_dict[sitename]).refdem_fn
#Max elevation difference between shot and sampled DEM
max_z_DEM_diff = 200
#Max elevation std for sampled DEM values in padded window around shot
max_DEMhiresArElv_std = 50.0
f = h5py.File(fn)
t = f.get('Data_40HZ/Time/d_UTCTime_40')[:]
#pyt0 = datetime(1, 1, 1, 0, 0)
#utct0 = datetime(1970, 1, 1, 0, 0)
#t0 = datetime(2000, 1, 1, 12, 0, 0)
#offset_s = (t0 - utct0).total_seconds()
offset_s = 946728000.0
t += offset_s
dt = timelib.np_utc2dt(t)
dt_o = timelib.dt2o(dt)
#dts = timelib.np_print_dt(dt)
#dt_decyear = timelib.np_dt2decyear(dt)
dt_int = np.array([ts.strftime('%Y%m%d') for ts in dt], dtype=long)
lat = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lat')[:], 1.7976931348623157e+308)
lon = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lon')[:], 1.7976931348623157e+308)
lon = geolib.lon360to180(lon)
z = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_elev')[:], 1.7976931348623157e+308)
print('Input: %i' % z.count())
#Now spatial filter - should do this up front
x = lon
y = lat
xmin, xmax, ymin, ymax = extent
#This is True if point is within extent
valid_idx = ((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax))
#Prepare output array
#out = np.ma.vstack([dt_decyear, dt_o, dt_int, lat, lon, z]).T
out = np.ma.vstack([dt_o, dt_int, lat, lon, z]).T
#Create a mask to ensure all four values are valid for each point
mask = ~(np.any(np.ma.getmaskarray(out), axis=1))
mask *= valid_idx
out = out[mask]
valid_idx = ~(np.any(np.ma.getmaskarray(out), axis=1))
#Lon and lat indices
xcol = 3
ycol = 2
zcol = 4
if out.shape[0] == 0:
sys.exit("No points within specified extent\n")
else:
print("Spatial filter: %i" % out.shape[0])
#out_fmt = ['%0.8f', '%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f']
#out_hdr = ['dt_decyear, dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84']
out_fmt = ['%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f']
out_hdr = ['dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84']
#Saturation Correction Flag
#These are 0 to 5, not_saturated inconsequential applicable not_computed not_applicable
sat_corr_flg = f.get('Data_40HZ/Quality/sat_corr_flg')[mask]
#valid_idx *= (sat_corr_flg < 2)
#Correction to elevation for saturated waveforms
#Notes suggest this might not be desirable over land
satElevCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_satElevCorr')[mask], 1.7976931348623157e+308)
#z[sat_corr_flg < 3] += satElevCorr.filled(0.0)[sat_corr_flg < 3]
out[:,zcol] += satElevCorr.filled(0.0)
#Correction to elevation based on post flight analysis for biases determined for each campaign
ElevBiasCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_ElevBiasCorr')[mask], 1.7976931348623157e+308)
out[:,zcol] += ElevBiasCorr.filled(0.0)
#Surface elevation (T/P ellipsoid) minus surface elevation (WGS84 ellipsoid).
#Approximately 0.7 m, so WGS is lower; need to subtract from d_elev
deltaEllip = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_deltaEllip')[mask], 1.7976931348623157e+308)
out[:,zcol] -= deltaEllip
#These are 1 for valid, 0 for invalid
valid_idx *= ~(np.ma.getmaskarray(out[:,zcol]))
print("z corrections: %i" % valid_idx.nonzero()[0].size)
if False:
#Reflectivity, not corrected for atmospheric effects
reflctUC = np.ma.masked_equal(f.get('Data_40HZ/Reflectivity/d_reflctUC')[mask], 1.7976931348623157e+308)
#This was minimum used for ice sheets
min_reflctUC = 0.025
valid_idx *= (reflctUC > min_reflctUC).data
print("reflctUC: %i" % valid_idx.nonzero()[0].size)
if False:
#The Standard deviation of the difference between the functional fit and the received echo \
#using alternate parameters. It is directly taken from GLA05 parameter d_wfFitSDev_1
LandVar = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_LandVar')[mask], 1.7976931348623157e+308)
#This was max used for ice sheets
max_LandVar = 0.04
valid_idx *= (LandVar < max_LandVar).data
print("LandVar: %i" % valid_idx.nonzero()[0].size)
if True:
#Flag indicating whether the elevations on this record should be used.
#0 = valid, 1 = not valid
elev_use_flg = f.get('Data_40HZ/Quality/elev_use_flg')[mask].astype('Bool')
valid_idx *= ~elev_use_flg
print("elev_use_flg: %i" % valid_idx.nonzero()[0].size)
if False:
#Cloud contamination; Indicates if Gain > flag value, indicating probable cloud contamination.
elv_cloud_flg = f.get('Data_40HZ/Elevation_Flags/elv_cloud_flg')[mask].astype('Bool')
valid_idx *= ~elv_cloud_flg
print("elv_cloud_flg: %i" % valid_idx.nonzero()[0].size)
if False:
#Full resolution 1064 Quality Flag; 0 - 12 indicate Cloud detected
FRir_qa_flg = f.get('Data_40HZ/Atmosphere/FRir_qa_flg')[mask]
valid_idx *= (FRir_qa_flg == 15).data
print("FRir_qa_flg: %i" % valid_idx.nonzero()[0].size)
if False:
#This is elevation extracted from SRTM30
DEM_elv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEM_elv')[mask], 1.7976931348623157e+308)
z_DEM_diff = np.abs(out[:,zcol] - DEM_elv)
valid_idx *= (z_DEM_diff < max_z_DEM_diff).data
print("z_DEM_diff: %i" % valid_idx.nonzero()[0].size)
#d_DEMhiresArElv is a 9 element array of high resolution DEM values. The array index corresponds to the position of the DEM value relative to the spot. (5) is the footprint center.
DEMhiresArElv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEMhiresArElv')[mask], 1.7976931348623157e+308)
DEMhiresArElv_std = np.ma.std(DEMhiresArElv, axis=1)
valid_idx *= (DEMhiresArElv_std < max_DEMhiresArElv_std).data
print("max_DEMhiresArElv_std: %i" % valid_idx.nonzero()[0].size)
#Compute slope
#Apply cumulative filter to output
out = out[valid_idx]
out_fn = os.path.splitext(fn)[0]+'_%s.csv' % sitename
print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
out_fmt_str = ', '.join(out_fmt)
out_hdr_str = ', '.join(out_hdr)
np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
iolib.writevrt(out_fn, x='lon', y='lat')
#Extract our own DEM values - should be better than default GLAS reference DEM stats
if True:
print("Loading reference DEM: %s" % refdem_fn)
dem_ds = gdal.Open(refdem_fn)
print("Converting coords for DEM")
dem_mX, dem_mY = geolib.ds_cT(dem_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs)
print("Sampling")
dem_samp = geolib.sample(dem_ds, dem_mX, dem_mY, pad='glas')
abs_dem_z_diff = np.abs(out[:,zcol] - dem_samp[:,0])
valid_idx *= ~(np.ma.getmaskarray(abs_dem_z_diff))
print("Valid DEM extract: %i" % valid_idx.nonzero()[0].size)
valid_idx *= (abs_dem_z_diff < max_z_DEM_diff).data
print("Valid abs DEM diff: %i" % valid_idx.nonzero()[0].size)
valid_idx *= (dem_samp[:,1] < max_DEMhiresArElv_std).data
print("Valid DEM mad: %i" % valid_idx.nonzero()[0].size)
if valid_idx.nonzero()[0].size == 0:
sys.exit("No valid points remain")
out = np.ma.hstack([out, dem_samp])
out_fmt.extend(['%0.2f', '%0.2f'])
out_hdr.extend(['z_refdem_med_WGS84', 'z_refdem_nmad'])
#Apply cumulative filter to output
out = out[valid_idx]
out_fn = os.path.splitext(out_fn)[0]+'_refdemfilt.csv'
print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
out_fmt_str = ', '.join(out_fmt)
out_hdr_str = ', '.join(out_hdr)
np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
iolib.writevrt(out_fn, x='lon', y='lat')
#This will sample land-use/land-cover or percent bareground products
#Can be used to isolate points over exposed rock
#if args.rockfilter:
if True:
#This should automatically identify appropriate LULC source based on refdem extent
lulc_source = dem_mask.get_lulc_source(dem_ds)
#Looks like NED extends beyond NCLD, force use NLCD for conus
#if sitename == 'conus':
# lulc_source = 'nlcd'
lulc_ds = dem_mask.get_lulc_ds_full(dem_ds, lulc_source)
print("Converting coords for LULC")
lulc_mX, lulc_mY = geolib.ds_cT(lulc_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs)
print("Sampling LULC: %s" % lulc_source)
#Note: want to make sure we're not interpolating integer values for NLCD
#Should be safe with pad=0, even with pad>0, should take median, not mean
lulc_samp = geolib.sample(lulc_ds, lulc_mX, lulc_mY, pad=0)
l = lulc_samp[:,0].data
if lulc_source == 'nlcd':
#This passes rock and ice pixels
valid_idx = np.logical_or((l==31),(l==12))
elif lulc_source == 'bareground':
#This preserves pixels with bareground percentation >85%
minperc = 85
valid_idx = (l >= minperc)
else:
print("Unknown LULC source")
print("LULC: %i" % valid_idx.nonzero()[0].size)
if l.ndim == 1:
l = l[:,np.newaxis]
out = np.ma.hstack([out, l])
out_fmt.append('%i')
out_hdr.append('lulc')
#Apply cumulative filter to output
out = out[valid_idx]
out_fn = os.path.splitext(out_fn)[0]+'_lulcfilt.csv'
print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
out_fmt_str = ', '.join(out_fmt)
out_hdr_str = ', '.join(out_hdr)
np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
iolib.writevrt(out_fn, x='lon', y='lat')
|
ICESat-1 filters
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/glas_proc.py#L59-L296
|
[
"def getparser():\n parser = argparse.ArgumentParser(description=\"Process and filter ICESat GLAS points\")\n parser.add_argument('fn', type=str, help='GLAH14 HDF5 filename')\n site_choices = geolib.site_dict.keys()\n parser.add_argument('sitename', type=str, choices=site_choices, help='Site name')\n #parser.add_argument('--rockfilter', action='store_true', help='Only output points over exposed rock using NLCD or bareground')\n parser.add_argument('-extent', type=str, default=None, help='Specify output spatial extent (\"xmin xmax ymin ymax\"). Otherwise, use default specified for sitename in pygeotools/lib/geolib')\n parser.add_argument('-refdem_fn', type=str, default=None, help='Specify alternative reference DEM for filtering. Otherwise use NED or SRTM')\n return parser\n"
] |
#! /usr/bin/env python
#David Shean
#dshean@gmail.com
#Utility to process ICESat-1 GLAS products, filter and clip to specified bounding box
#Input is HDF5 GLAH14
#https://nsidc.org/data/GLAH14/versions/34
#http://nsidc.org/data/docs/daac/glas_altimetry/data-dictionary-glah14.html
import os, sys
from datetime import datetime, timedelta
import argparse
import h5py
import numpy as np
from osgeo import gdal
from pygeotools.lib import timelib, geolib, iolib, malib, filtlib
#This is needed for LULC products
import dem_mask
#Before running, download all GLAH14 products
#lftp ftp://n5eil01u.ecs.nsidc.org/DP5/GLAS/
#mirror --parallel=16 GLAH14.034
"""
cd GLAH14.034
lfs setstripe -c 32 .
for site in conus hma
do
parallel --progress --delay 1 -j 32 "~/src/demcoreg/demcoreg/glas_proc.py {} $site" ::: */*.H5
#Combine output
for ext in ${site}.csv ${site}_refdemfilt.csv ${site}_refdemfilt_lulcfilt.csv
do
first=$(ls */*$ext | head -1)
head -1 $first > GLAH14_$ext
cat */*$ext | sort -n | grep -v lat >> GLAH14_$ext
done
done
"""
#Clip to glacier polygons
#clipsrc=/Volumes/d/hma/rgi/rgi_hma_aea_110kmbuffer_wgs84.shp
#vrt=GLAH14_tllz_hma_lulcfilt_demfilt.vrt
#ogr2ogr -progress -overwrite -clipsrc $clipsrc ${vrt%.*}_clip.shp $vrt
def getparser():
parser = argparse.ArgumentParser(description="Process and filter ICESat GLAS points")
parser.add_argument('fn', type=str, help='GLAH14 HDF5 filename')
site_choices = geolib.site_dict.keys()
parser.add_argument('sitename', type=str, choices=site_choices, help='Site name')
#parser.add_argument('--rockfilter', action='store_true', help='Only output points over exposed rock using NLCD or bareground')
parser.add_argument('-extent', type=str, default=None, help='Specify output spatial extent ("xmin xmax ymin ymax"). Otherwise, use default specified for sitename in pygeotools/lib/geolib')
parser.add_argument('-refdem_fn', type=str, default=None, help='Specify alternative reference DEM for filtering. Otherwise use NED or SRTM')
return parser
def main():
parser = getparser()
args = parser.parse_args()
fn = args.fn
sitename = args.sitename
#User-specified output extent
#Note: not checked, untested
if args.extent is not None:
extent = (args.extent).split()
else:
extent = (geolib.site_dict[sitename]).extent
if args.refdem_fn is not None:
refdem_fn = args.refdem_fn
else:
refdem_fn = (geolib.site_dict[sitename]).refdem_fn
#Max elevation difference between shot and sampled DEM
max_z_DEM_diff = 200
#Max elevation std for sampled DEM values in padded window around shot
max_DEMhiresArElv_std = 50.0
f = h5py.File(fn)
t = f.get('Data_40HZ/Time/d_UTCTime_40')[:]
#pyt0 = datetime(1, 1, 1, 0, 0)
#utct0 = datetime(1970, 1, 1, 0, 0)
#t0 = datetime(2000, 1, 1, 12, 0, 0)
#offset_s = (t0 - utct0).total_seconds()
offset_s = 946728000.0
t += offset_s
dt = timelib.np_utc2dt(t)
dt_o = timelib.dt2o(dt)
#dts = timelib.np_print_dt(dt)
#dt_decyear = timelib.np_dt2decyear(dt)
dt_int = np.array([ts.strftime('%Y%m%d') for ts in dt], dtype=long)
lat = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lat')[:], 1.7976931348623157e+308)
lon = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lon')[:], 1.7976931348623157e+308)
lon = geolib.lon360to180(lon)
z = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_elev')[:], 1.7976931348623157e+308)
print('Input: %i' % z.count())
#Now spatial filter - should do this up front
x = lon
y = lat
xmin, xmax, ymin, ymax = extent
#This is True if point is within extent
valid_idx = ((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax))
#Prepare output array
#out = np.ma.vstack([dt_decyear, dt_o, dt_int, lat, lon, z]).T
out = np.ma.vstack([dt_o, dt_int, lat, lon, z]).T
#Create a mask to ensure all four values are valid for each point
mask = ~(np.any(np.ma.getmaskarray(out), axis=1))
mask *= valid_idx
out = out[mask]
valid_idx = ~(np.any(np.ma.getmaskarray(out), axis=1))
#Lon and lat indices
xcol = 3
ycol = 2
zcol = 4
if out.shape[0] == 0:
sys.exit("No points within specified extent\n")
else:
print("Spatial filter: %i" % out.shape[0])
#out_fmt = ['%0.8f', '%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f']
#out_hdr = ['dt_decyear, dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84']
out_fmt = ['%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f']
out_hdr = ['dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84']
"""
ICESat-1 filters
"""
#Saturation Correction Flag
#These are 0 to 5, not_saturated inconsequential applicable not_computed not_applicable
sat_corr_flg = f.get('Data_40HZ/Quality/sat_corr_flg')[mask]
#valid_idx *= (sat_corr_flg < 2)
#Correction to elevation for saturated waveforms
#Notes suggest this might not be desirable over land
satElevCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_satElevCorr')[mask], 1.7976931348623157e+308)
#z[sat_corr_flg < 3] += satElevCorr.filled(0.0)[sat_corr_flg < 3]
out[:,zcol] += satElevCorr.filled(0.0)
#Correction to elevation based on post flight analysis for biases determined for each campaign
ElevBiasCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_ElevBiasCorr')[mask], 1.7976931348623157e+308)
out[:,zcol] += ElevBiasCorr.filled(0.0)
#Surface elevation (T/P ellipsoid) minus surface elevation (WGS84 ellipsoid).
#Approximately 0.7 m, so WGS is lower; need to subtract from d_elev
deltaEllip = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_deltaEllip')[mask], 1.7976931348623157e+308)
out[:,zcol] -= deltaEllip
#These are 1 for valid, 0 for invalid
valid_idx *= ~(np.ma.getmaskarray(out[:,zcol]))
print("z corrections: %i" % valid_idx.nonzero()[0].size)
if False:
#Reflectivity, not corrected for atmospheric effects
reflctUC = np.ma.masked_equal(f.get('Data_40HZ/Reflectivity/d_reflctUC')[mask], 1.7976931348623157e+308)
#This was minimum used for ice sheets
min_reflctUC = 0.025
valid_idx *= (reflctUC > min_reflctUC).data
print("reflctUC: %i" % valid_idx.nonzero()[0].size)
if False:
#The Standard deviation of the difference between the functional fit and the received echo \
#using alternate parameters. It is directly taken from GLA05 parameter d_wfFitSDev_1
LandVar = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_LandVar')[mask], 1.7976931348623157e+308)
#This was max used for ice sheets
max_LandVar = 0.04
valid_idx *= (LandVar < max_LandVar).data
print("LandVar: %i" % valid_idx.nonzero()[0].size)
if True:
#Flag indicating whether the elevations on this record should be used.
#0 = valid, 1 = not valid
elev_use_flg = f.get('Data_40HZ/Quality/elev_use_flg')[mask].astype('Bool')
valid_idx *= ~elev_use_flg
print("elev_use_flg: %i" % valid_idx.nonzero()[0].size)
if False:
#Cloud contamination; Indicates if Gain > flag value, indicating probable cloud contamination.
elv_cloud_flg = f.get('Data_40HZ/Elevation_Flags/elv_cloud_flg')[mask].astype('Bool')
valid_idx *= ~elv_cloud_flg
print("elv_cloud_flg: %i" % valid_idx.nonzero()[0].size)
if False:
#Full resolution 1064 Quality Flag; 0 - 12 indicate Cloud detected
FRir_qa_flg = f.get('Data_40HZ/Atmosphere/FRir_qa_flg')[mask]
valid_idx *= (FRir_qa_flg == 15).data
print("FRir_qa_flg: %i" % valid_idx.nonzero()[0].size)
if False:
#This is elevation extracted from SRTM30
DEM_elv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEM_elv')[mask], 1.7976931348623157e+308)
z_DEM_diff = np.abs(out[:,zcol] - DEM_elv)
valid_idx *= (z_DEM_diff < max_z_DEM_diff).data
print("z_DEM_diff: %i" % valid_idx.nonzero()[0].size)
#d_DEMhiresArElv is a 9 element array of high resolution DEM values. The array index corresponds to the position of the DEM value relative to the spot. (5) is the footprint center.
DEMhiresArElv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEMhiresArElv')[mask], 1.7976931348623157e+308)
DEMhiresArElv_std = np.ma.std(DEMhiresArElv, axis=1)
valid_idx *= (DEMhiresArElv_std < max_DEMhiresArElv_std).data
print("max_DEMhiresArElv_std: %i" % valid_idx.nonzero()[0].size)
#Compute slope
#Apply cumulative filter to output
out = out[valid_idx]
out_fn = os.path.splitext(fn)[0]+'_%s.csv' % sitename
print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
out_fmt_str = ', '.join(out_fmt)
out_hdr_str = ', '.join(out_hdr)
np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
iolib.writevrt(out_fn, x='lon', y='lat')
#Extract our own DEM values - should be better than default GLAS reference DEM stats
if True:
print("Loading reference DEM: %s" % refdem_fn)
dem_ds = gdal.Open(refdem_fn)
print("Converting coords for DEM")
dem_mX, dem_mY = geolib.ds_cT(dem_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs)
print("Sampling")
dem_samp = geolib.sample(dem_ds, dem_mX, dem_mY, pad='glas')
abs_dem_z_diff = np.abs(out[:,zcol] - dem_samp[:,0])
valid_idx *= ~(np.ma.getmaskarray(abs_dem_z_diff))
print("Valid DEM extract: %i" % valid_idx.nonzero()[0].size)
valid_idx *= (abs_dem_z_diff < max_z_DEM_diff).data
print("Valid abs DEM diff: %i" % valid_idx.nonzero()[0].size)
valid_idx *= (dem_samp[:,1] < max_DEMhiresArElv_std).data
print("Valid DEM mad: %i" % valid_idx.nonzero()[0].size)
if valid_idx.nonzero()[0].size == 0:
sys.exit("No valid points remain")
out = np.ma.hstack([out, dem_samp])
out_fmt.extend(['%0.2f', '%0.2f'])
out_hdr.extend(['z_refdem_med_WGS84', 'z_refdem_nmad'])
#Apply cumulative filter to output
out = out[valid_idx]
out_fn = os.path.splitext(out_fn)[0]+'_refdemfilt.csv'
print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
out_fmt_str = ', '.join(out_fmt)
out_hdr_str = ', '.join(out_hdr)
np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
iolib.writevrt(out_fn, x='lon', y='lat')
#This will sample land-use/land-cover or percent bareground products
#Can be used to isolate points over exposed rock
#if args.rockfilter:
if True:
#This should automatically identify appropriate LULC source based on refdem extent
lulc_source = dem_mask.get_lulc_source(dem_ds)
#Looks like NED extends beyond NCLD, force use NLCD for conus
#if sitename == 'conus':
# lulc_source = 'nlcd'
lulc_ds = dem_mask.get_lulc_ds_full(dem_ds, lulc_source)
print("Converting coords for LULC")
lulc_mX, lulc_mY = geolib.ds_cT(lulc_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs)
print("Sampling LULC: %s" % lulc_source)
#Note: want to make sure we're not interpolating integer values for NLCD
#Should be safe with pad=0, even with pad>0, should take median, not mean
lulc_samp = geolib.sample(lulc_ds, lulc_mX, lulc_mY, pad=0)
l = lulc_samp[:,0].data
if lulc_source == 'nlcd':
#This passes rock and ice pixels
valid_idx = np.logical_or((l==31),(l==12))
elif lulc_source == 'bareground':
#This preserves pixels with bareground percentation >85%
minperc = 85
valid_idx = (l >= minperc)
else:
print("Unknown LULC source")
print("LULC: %i" % valid_idx.nonzero()[0].size)
if l.ndim == 1:
l = l[:,np.newaxis]
out = np.ma.hstack([out, l])
out_fmt.append('%i')
out_hdr.append('lulc')
#Apply cumulative filter to output
out = out[valid_idx]
out_fn = os.path.splitext(out_fn)[0]+'_lulcfilt.csv'
print("Writing out %i records to: %s\n" % (out.shape[0], out_fn))
out_fmt_str = ', '.join(out_fmt)
out_hdr_str = ', '.join(out_hdr)
np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str)
iolib.writevrt(out_fn, x='lon', y='lat')
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
get_nlcd_fn
|
python
|
def get_nlcd_fn():
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
|
Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L34-L49
| null |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
def get_bareground_fn():
"""Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
"""
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
def get_icemask(ds, glac_shp_fn=None):
"""Generate glacier polygon raster mask for input Dataset res/extent
"""
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
#Create nlcd mask
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
"""Generate raster mask for specified NLCD LULC filter
"""
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
"""Generate raster mask for exposed bare ground from global bareground data
"""
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
def get_snodas_ds(dem_dt, code=1036):
"""Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
"""
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
def get_modis_tile_list(ds):
"""Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
"""
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
def proc_modscag(fn_list, extent=None, t_srs=None):
"""Process the MODSCAG products for full date range, create composites and reproject
"""
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
get_bareground_fn
|
python
|
def get_bareground_fn():
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
|
Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L51-L67
| null |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
def get_icemask(ds, glac_shp_fn=None):
"""Generate glacier polygon raster mask for input Dataset res/extent
"""
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
#Create nlcd mask
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
"""Generate raster mask for specified NLCD LULC filter
"""
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
"""Generate raster mask for exposed bare ground from global bareground data
"""
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
def get_snodas_ds(dem_dt, code=1036):
"""Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
"""
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
def get_modis_tile_list(ds):
"""Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
"""
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
def proc_modscag(fn_list, extent=None, t_srs=None):
"""Process the MODSCAG products for full date range, create composites and reproject
"""
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
get_glacier_poly
|
python
|
def get_glacier_poly():
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
|
Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L70-L88
| null |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
def get_bareground_fn():
"""Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
"""
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
def get_icemask(ds, glac_shp_fn=None):
"""Generate glacier polygon raster mask for input Dataset res/extent
"""
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
#Create nlcd mask
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
"""Generate raster mask for specified NLCD LULC filter
"""
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
"""Generate raster mask for exposed bare ground from global bareground data
"""
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
def get_snodas_ds(dem_dt, code=1036):
"""Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
"""
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
def get_modis_tile_list(ds):
"""Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
"""
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
def proc_modscag(fn_list, extent=None, t_srs=None):
"""Process the MODSCAG products for full date range, create composites and reproject
"""
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
get_icemask
|
python
|
def get_icemask(ds, glac_shp_fn=None):
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
|
Generate glacier polygon raster mask for input Dataset res/extent
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L91-L105
|
[
"def get_glacier_poly():\n \"\"\"Calls external shell script `get_rgi.sh` to fetch:\n\n Randolph Glacier Inventory (RGI) glacier outline shapefiles \n\n Full RGI database: rgi50.zip is 410 MB\n\n The shell script will unzip and merge regional shp into single global shp\n\n http://www.glims.org/RGI/\n \"\"\"\n #rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')\n #Update to rgi60, should have this returned from get_rgi.sh\n rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')\n if not os.path.exists(rgi_fn):\n cmd = ['get_rgi.sh',]\n sys.exit(\"Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download\" % cmd[0])\n #subprocess.call(cmd)\n return rgi_fn \n"
] |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
def get_bareground_fn():
"""Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
"""
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
#Create nlcd mask
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
"""Generate raster mask for specified NLCD LULC filter
"""
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
"""Generate raster mask for exposed bare ground from global bareground data
"""
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
def get_snodas_ds(dem_dt, code=1036):
"""Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
"""
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
def get_modis_tile_list(ds):
"""Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
"""
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
def proc_modscag(fn_list, extent=None, t_srs=None):
"""Process the MODSCAG products for full date range, create composites and reproject
"""
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
get_nlcd_mask
|
python
|
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
|
Generate raster mask for specified NLCD LULC filter
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L108-L141
| null |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
def get_bareground_fn():
"""Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
"""
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
def get_icemask(ds, glac_shp_fn=None):
"""Generate glacier polygon raster mask for input Dataset res/extent
"""
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
#Create nlcd mask
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
"""Generate raster mask for exposed bare ground from global bareground data
"""
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
def get_snodas_ds(dem_dt, code=1036):
"""Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
"""
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
def get_modis_tile_list(ds):
"""Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
"""
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
def proc_modscag(fn_list, extent=None, t_srs=None):
"""Process the MODSCAG products for full date range, create composites and reproject
"""
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.