repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
palantir/typedjsonrpc | typedjsonrpc/method_info.py | MethodInfo.describe | python | def describe(self):
return {
"name": self.name,
"params": self.params,
"returns": self.returns,
"description": self.description,
} | Describes the method.
:return: Description
:rtype: dict[str, object] | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L36-L47 | null | class MethodInfo(namedtuple("MethodInfo", ["name", "method", "signature"])):
"""An object wrapping a method and information about it.
:attribute name: Name of the function
:type name: str
:attribute method: The function being described
:type method: function
:attribute signature: A description of the types this method takes as parameters and returns
:type signature: MethodSignature
"""
@property
def params(self):
"""The parameters for this method in a JSON-compatible format
:rtype: list[dict[str, str]]
"""
return [{"name": p_name, "type": p_type.__name__}
for (p_name, p_type) in self.signature.parameter_types]
@property
def returns(self):
"""The return type for this method in a JSON-compatible format.
This handles the special case of ``None`` which allows ``type(None)`` also.
:rtype: str | None
"""
return_type = self.signature.return_type
none_type = type(None)
if return_type is not None and return_type is not none_type:
return return_type.__name__
@property
def description(self):
"""Returns the docstring for this method.
:rtype: str
"""
return self.method.__doc__
|
palantir/typedjsonrpc | typedjsonrpc/method_info.py | MethodInfo.params | python | def params(self):
return [{"name": p_name, "type": p_type.__name__}
for (p_name, p_type) in self.signature.parameter_types] | The parameters for this method in a JSON-compatible format
:rtype: list[dict[str, str]] | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L50-L56 | null | class MethodInfo(namedtuple("MethodInfo", ["name", "method", "signature"])):
"""An object wrapping a method and information about it.
:attribute name: Name of the function
:type name: str
:attribute method: The function being described
:type method: function
:attribute signature: A description of the types this method takes as parameters and returns
:type signature: MethodSignature
"""
def describe(self):
"""Describes the method.
:return: Description
:rtype: dict[str, object]
"""
return {
"name": self.name,
"params": self.params,
"returns": self.returns,
"description": self.description,
}
@property
@property
def returns(self):
"""The return type for this method in a JSON-compatible format.
This handles the special case of ``None`` which allows ``type(None)`` also.
:rtype: str | None
"""
return_type = self.signature.return_type
none_type = type(None)
if return_type is not None and return_type is not none_type:
return return_type.__name__
@property
def description(self):
"""Returns the docstring for this method.
:rtype: str
"""
return self.method.__doc__
|
palantir/typedjsonrpc | typedjsonrpc/method_info.py | MethodInfo.returns | python | def returns(self):
return_type = self.signature.return_type
none_type = type(None)
if return_type is not None and return_type is not none_type:
return return_type.__name__ | The return type for this method in a JSON-compatible format.
This handles the special case of ``None`` which allows ``type(None)`` also.
:rtype: str | None | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L59-L69 | null | class MethodInfo(namedtuple("MethodInfo", ["name", "method", "signature"])):
"""An object wrapping a method and information about it.
:attribute name: Name of the function
:type name: str
:attribute method: The function being described
:type method: function
:attribute signature: A description of the types this method takes as parameters and returns
:type signature: MethodSignature
"""
def describe(self):
"""Describes the method.
:return: Description
:rtype: dict[str, object]
"""
return {
"name": self.name,
"params": self.params,
"returns": self.returns,
"description": self.description,
}
@property
def params(self):
"""The parameters for this method in a JSON-compatible format
:rtype: list[dict[str, str]]
"""
return [{"name": p_name, "type": p_type.__name__}
for (p_name, p_type) in self.signature.parameter_types]
@property
@property
def description(self):
"""Returns the docstring for this method.
:rtype: str
"""
return self.method.__doc__
|
palantir/typedjsonrpc | typedjsonrpc/method_info.py | MethodSignature.create | python | def create(parameter_names, parameter_types, return_type):
ordered_pairs = [(name, parameter_types[name]) for name in parameter_names]
return MethodSignature(ordered_pairs, return_type) | Returns a signature object ensuring order of parameter names and types.
:param parameter_names: A list of ordered parameter names
:type parameter_names: list[str]
:param parameter_types: A dictionary of parameter names to types
:type parameter_types: dict[str, type]
:param return_type: The type the function returns
:type return_type: type
:rtype: MethodSignature | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L90-L102 | null | class MethodSignature(namedtuple("MethodSignature", ["parameter_types", "return_type"])):
"""Represents the types which a function takes as input and output.
:attribute parameter_types: A list of tuples mapping strings to type with a specified order
:type parameter_types: list[str, type]
:attribute return_type: The type which the function returns
:type return_type: type
"""
@staticmethod
|
palantir/typedjsonrpc | contrib/multi-module-example/typedjsonrpc_example/valid.py | histogram | python | def histogram(data):
ret = {}
for datum in data:
if datum in ret:
ret[datum] += 1
else:
ret[datum] = 1
return ret | Returns a histogram of your data.
:param data: The data to histogram
:type data: list[object]
:return: The histogram
:rtype: dict[object, int] | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/contrib/multi-module-example/typedjsonrpc_example/valid.py#L22-L36 | null | # coding: utf-8
#
# Copyright 2015 Palantir Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from . import registry
@registry.method(returns=dict, data=list)
@registry.method(returns=bool, data=list)
def all_true(data):
"""Checks if all of the elements are True.
:param data: The list of boolean values
:type data: list[bool]
:return: True if all elements in the list are true
:rtype: bool
"""
return all(data)
@registry.method(returns=float, data=list)
def mean(data):
"""Returns the mean of the data as a float
:param data: The list of boolean values
:type data: list[int | float]
:return: The mean of the list
:rtype: float
"""
total = sum(data)
return total / len(data)
@registry.method(returns=None, data=dict)
def print_data(data):
"""Prints object key-value pairs in a custom format
:param data: The dict to print
:type data: dict
:rtype: None
"""
print(", ".join(["{}=>{}".format(key, value) for key, value in data]))
|
palantir/typedjsonrpc | contrib/multi-module-example/typedjsonrpc_example/valid.py | print_data | python | def print_data(data):
print(", ".join(["{}=>{}".format(key, value) for key, value in data])) | Prints object key-value pairs in a custom format
:param data: The dict to print
:type data: dict
:rtype: None | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/contrib/multi-module-example/typedjsonrpc_example/valid.py#L65-L72 | null | # coding: utf-8
#
# Copyright 2015 Palantir Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from . import registry
@registry.method(returns=dict, data=list)
def histogram(data):
"""Returns a histogram of your data.
:param data: The data to histogram
:type data: list[object]
:return: The histogram
:rtype: dict[object, int]
"""
ret = {}
for datum in data:
if datum in ret:
ret[datum] += 1
else:
ret[datum] = 1
return ret
@registry.method(returns=bool, data=list)
def all_true(data):
"""Checks if all of the elements are True.
:param data: The list of boolean values
:type data: list[bool]
:return: True if all elements in the list are true
:rtype: bool
"""
return all(data)
@registry.method(returns=float, data=list)
def mean(data):
"""Returns the mean of the data as a float
:param data: The list of boolean values
:type data: list[int | float]
:return: The mean of the list
:rtype: float
"""
total = sum(data)
return total / len(data)
@registry.method(returns=None, data=dict)
|
SpotlightData/preprocessing | setup.py | get_requirements | python | def get_requirements():
'''returns requirements array for package'''
packages = []
with open("requirements.txt", "r") as req_doc:
for package in req_doc:
packages.append(package.replace("\n", ""))
return packages | returns requirements array for package | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/setup.py#L5-L11 | null | '''setup for pre_processing package'''
from setuptools import find_packages, setup
setup(name="preprocessing",
version="0.1.12",
classifiers=["Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6"],
description="pre-processing package for text strings",
long_description=open("README.rst").read(),
keywords="text pre-processing",
url="https://github.com/SpotlightData/preprocessing",
author="Mitchell Murphy",
author_email="mwtmurphy@gmail.com",
license="MIT",
python_requires=">=3",
packages=find_packages(),
package_data={
"preprocessing": [
"data/tokenizers/punkt/PY3/english.pickle",
"data/tokenizers/punkt/english.pickle",
"data/corpora/stopwords/english",
],
"preprocessing.data": ["*"],
"preprocessing.data.corpora.wordnet" : ["*"]
},
include_package_data=True,
install_requires=get_requirements(),
test_suite="nose.collector",
tests_require=["nose"],
scripts=["bin/demo"],
zip_safe=False)
|
SpotlightData/preprocessing | preprocessing/spellcheck.py | correct_word | python | def correct_word(word_string):
'''
Finds all valid one and two letter corrections for word_string, returning the word
with the highest relative probability as type str.
'''
if word_string is None:
return ""
elif isinstance(word_string, str):
return max(find_candidates(word_string), key=find_word_prob)
else:
raise InputError("string or none type variable not passed as argument to correct_word") | Finds all valid one and two letter corrections for word_string, returning the word
with the highest relative probability as type str. | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/spellcheck.py#L18-L28 | [
"def find_candidates(word_string):\n '''\n Finds all potential words word_string could have intended to mean. If a word is not incorrectly\n spelled, it will return this word first, else if will look for one letter edits that are correct.\n If there are no valid one letter edits, it will perform a two letter edit search.\n\n If valid corrections are found, all are returned as a set instance. Should a valid word not be\n found, the original word is returned as a set instance.\n '''\n if word_string is None:\n return {}\n elif isinstance(word_string, str):\n return (validate_words([word_string]) or validate_words(list(find_one_letter_edits(word_string)))\n or validate_words(list(find_two_letter_edits(word_string))) or set([word_string]))\n else:\n raise InputError(\"string or none type variable not passed as argument to find_candidates\")\n"
] | '''
Spelling checker module:
'''
import re
from os import path
from collections import Counter
from preprocessing.errors import InputError
EN_ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
WORD_DISTRIBUTION = Counter(re.findall(r'\w+', open(path.join(path.dirname(__file__), 'data/bnc_wiktionary_corpus.txt')).read().lower()))
#functions
def find_candidates(word_string):
'''
Finds all potential words word_string could have intended to mean. If a word is not incorrectly
spelled, it will return this word first, else if will look for one letter edits that are correct.
If there are no valid one letter edits, it will perform a two letter edit search.
If valid corrections are found, all are returned as a set instance. Should a valid word not be
found, the original word is returned as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (validate_words([word_string]) or validate_words(list(find_one_letter_edits(word_string)))
or validate_words(list(find_two_letter_edits(word_string))) or set([word_string]))
else:
raise InputError("string or none type variable not passed as argument to find_candidates")
def find_one_letter_edits(word_string):
'''
Finds all possible one letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
Returns all one letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
splits = [(word_string[:i], word_string[i:]) for i in range(len(word_string) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in EN_ALPHABET]
inserts = [L + c + R for L, R in splits for c in EN_ALPHABET]
return set(deletes + transposes + replaces + inserts)
else:
raise InputError("string or none type variable not passed as argument to find_one_letter_edits")
def find_two_letter_edits(word_string):
'''
Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (e2 for e1 in find_one_letter_edits(word_string) for e2 in find_one_letter_edits(e1))
else:
raise InputError("string or none type variable not passed as argument to find_two_letter_edits")
def find_word_prob(word_string, word_total=sum(WORD_DISTRIBUTION.values())):
'''
Finds the relative probability of the word appearing given context of a base corpus.
Returns this probability value as a float instance.
'''
if word_string is None:
return 0
elif isinstance(word_string, str):
return WORD_DISTRIBUTION[word_string] / word_total
else:
raise InputError("string or none type variable not passed as argument to find_word_prob")
def validate_words(word_list):
'''
Checks for each edited word in word_list if that word is a valid english word.abs
Returns all validated words as a set instance.
'''
if word_list is None:
return {}
elif isinstance(word_list, list):
if not word_list:
return {}
else:
return set(word for word in word_list if word in WORD_DISTRIBUTION)
else:
raise InputError("list variable not passed as argument to validate_words")
|
SpotlightData/preprocessing | preprocessing/spellcheck.py | find_candidates | python | def find_candidates(word_string):
'''
Finds all potential words word_string could have intended to mean. If a word is not incorrectly
spelled, it will return this word first, else if will look for one letter edits that are correct.
If there are no valid one letter edits, it will perform a two letter edit search.
If valid corrections are found, all are returned as a set instance. Should a valid word not be
found, the original word is returned as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (validate_words([word_string]) or validate_words(list(find_one_letter_edits(word_string)))
or validate_words(list(find_two_letter_edits(word_string))) or set([word_string]))
else:
raise InputError("string or none type variable not passed as argument to find_candidates") | Finds all potential words word_string could have intended to mean. If a word is not incorrectly
spelled, it will return this word first, else if will look for one letter edits that are correct.
If there are no valid one letter edits, it will perform a two letter edit search.
If valid corrections are found, all are returned as a set instance. Should a valid word not be
found, the original word is returned as a set instance. | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/spellcheck.py#L30-L45 | [
"def validate_words(word_list):\n '''\n Checks for each edited word in word_list if that word is a valid english word.abs\n Returns all validated words as a set instance.\n '''\n if word_list is None:\n return {}\n elif isinstance(word_list, list):\n if not word_list:\n return {}\n else:\n return set(word for word in word_list if word in WORD_DISTRIBUTION)\n else:\n raise InputError(\"list variable not passed as argument to validate_words\")\n",
"def find_one_letter_edits(word_string):\n '''\n Finds all possible one letter edits of word_string:\n - Splitting word_string into two words at all character locations\n - Deleting one letter at all character locations\n - Switching neighbouring characters\n - Replacing a character with every alphabetical letter\n - Inserting all possible alphabetical characters between each character location including boundaries\n\n Returns all one letter edits as a set instance.\n '''\n if word_string is None:\n return {}\n elif isinstance(word_string, str):\n splits = [(word_string[:i], word_string[i:]) for i in range(len(word_string) + 1)]\n deletes = [L + R[1:] for L, R in splits if R]\n transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]\n replaces = [L + c + R[1:] for L, R in splits if R for c in EN_ALPHABET]\n inserts = [L + c + R for L, R in splits for c in EN_ALPHABET]\n return set(deletes + transposes + replaces + inserts)\n else:\n raise InputError(\"string or none type variable not passed as argument to find_one_letter_edits\")\n"
] | '''
Spelling checker module:
'''
import re
from os import path
from collections import Counter
from preprocessing.errors import InputError
EN_ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
WORD_DISTRIBUTION = Counter(re.findall(r'\w+', open(path.join(path.dirname(__file__), 'data/bnc_wiktionary_corpus.txt')).read().lower()))
#functions
def correct_word(word_string):
'''
Finds all valid one and two letter corrections for word_string, returning the word
with the highest relative probability as type str.
'''
if word_string is None:
return ""
elif isinstance(word_string, str):
return max(find_candidates(word_string), key=find_word_prob)
else:
raise InputError("string or none type variable not passed as argument to correct_word")
def find_one_letter_edits(word_string):
'''
Finds all possible one letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
Returns all one letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
splits = [(word_string[:i], word_string[i:]) for i in range(len(word_string) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in EN_ALPHABET]
inserts = [L + c + R for L, R in splits for c in EN_ALPHABET]
return set(deletes + transposes + replaces + inserts)
else:
raise InputError("string or none type variable not passed as argument to find_one_letter_edits")
def find_two_letter_edits(word_string):
'''
Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (e2 for e1 in find_one_letter_edits(word_string) for e2 in find_one_letter_edits(e1))
else:
raise InputError("string or none type variable not passed as argument to find_two_letter_edits")
def find_word_prob(word_string, word_total=sum(WORD_DISTRIBUTION.values())):
'''
Finds the relative probability of the word appearing given context of a base corpus.
Returns this probability value as a float instance.
'''
if word_string is None:
return 0
elif isinstance(word_string, str):
return WORD_DISTRIBUTION[word_string] / word_total
else:
raise InputError("string or none type variable not passed as argument to find_word_prob")
def validate_words(word_list):
'''
Checks for each edited word in word_list if that word is a valid english word.abs
Returns all validated words as a set instance.
'''
if word_list is None:
return {}
elif isinstance(word_list, list):
if not word_list:
return {}
else:
return set(word for word in word_list if word in WORD_DISTRIBUTION)
else:
raise InputError("list variable not passed as argument to validate_words")
|
SpotlightData/preprocessing | preprocessing/spellcheck.py | find_one_letter_edits | python | def find_one_letter_edits(word_string):
'''
Finds all possible one letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
Returns all one letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
splits = [(word_string[:i], word_string[i:]) for i in range(len(word_string) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in EN_ALPHABET]
inserts = [L + c + R for L, R in splits for c in EN_ALPHABET]
return set(deletes + transposes + replaces + inserts)
else:
raise InputError("string or none type variable not passed as argument to find_one_letter_edits") | Finds all possible one letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
Returns all one letter edits as a set instance. | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/spellcheck.py#L47-L68 | null | '''
Spelling checker module:
'''
import re
from os import path
from collections import Counter
from preprocessing.errors import InputError
EN_ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
WORD_DISTRIBUTION = Counter(re.findall(r'\w+', open(path.join(path.dirname(__file__), 'data/bnc_wiktionary_corpus.txt')).read().lower()))
#functions
def correct_word(word_string):
'''
Finds all valid one and two letter corrections for word_string, returning the word
with the highest relative probability as type str.
'''
if word_string is None:
return ""
elif isinstance(word_string, str):
return max(find_candidates(word_string), key=find_word_prob)
else:
raise InputError("string or none type variable not passed as argument to correct_word")
def find_candidates(word_string):
'''
Finds all potential words word_string could have intended to mean. If a word is not incorrectly
spelled, it will return this word first, else if will look for one letter edits that are correct.
If there are no valid one letter edits, it will perform a two letter edit search.
If valid corrections are found, all are returned as a set instance. Should a valid word not be
found, the original word is returned as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (validate_words([word_string]) or validate_words(list(find_one_letter_edits(word_string)))
or validate_words(list(find_two_letter_edits(word_string))) or set([word_string]))
else:
raise InputError("string or none type variable not passed as argument to find_candidates")
def find_two_letter_edits(word_string):
'''
Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (e2 for e1 in find_one_letter_edits(word_string) for e2 in find_one_letter_edits(e1))
else:
raise InputError("string or none type variable not passed as argument to find_two_letter_edits")
def find_word_prob(word_string, word_total=sum(WORD_DISTRIBUTION.values())):
'''
Finds the relative probability of the word appearing given context of a base corpus.
Returns this probability value as a float instance.
'''
if word_string is None:
return 0
elif isinstance(word_string, str):
return WORD_DISTRIBUTION[word_string] / word_total
else:
raise InputError("string or none type variable not passed as argument to find_word_prob")
def validate_words(word_list):
'''
Checks for each edited word in word_list if that word is a valid english word.abs
Returns all validated words as a set instance.
'''
if word_list is None:
return {}
elif isinstance(word_list, list):
if not word_list:
return {}
else:
return set(word for word in word_list if word in WORD_DISTRIBUTION)
else:
raise InputError("list variable not passed as argument to validate_words")
|
SpotlightData/preprocessing | preprocessing/spellcheck.py | find_two_letter_edits | python | def find_two_letter_edits(word_string):
'''
Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (e2 for e1 in find_one_letter_edits(word_string) for e2 in find_one_letter_edits(e1))
else:
raise InputError("string or none type variable not passed as argument to find_two_letter_edits") | Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance. | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/spellcheck.py#L70-L89 | [
"def find_one_letter_edits(word_string):\n '''\n Finds all possible one letter edits of word_string:\n - Splitting word_string into two words at all character locations\n - Deleting one letter at all character locations\n - Switching neighbouring characters\n - Replacing a character with every alphabetical letter\n - Inserting all possible alphabetical characters between each character location including boundaries\n\n Returns all one letter edits as a set instance.\n '''\n if word_string is None:\n return {}\n elif isinstance(word_string, str):\n splits = [(word_string[:i], word_string[i:]) for i in range(len(word_string) + 1)]\n deletes = [L + R[1:] for L, R in splits if R]\n transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]\n replaces = [L + c + R[1:] for L, R in splits if R for c in EN_ALPHABET]\n inserts = [L + c + R for L, R in splits for c in EN_ALPHABET]\n return set(deletes + transposes + replaces + inserts)\n else:\n raise InputError(\"string or none type variable not passed as argument to find_one_letter_edits\")\n"
] | '''
Spelling checker module:
'''
import re
from os import path
from collections import Counter
from preprocessing.errors import InputError
EN_ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
WORD_DISTRIBUTION = Counter(re.findall(r'\w+', open(path.join(path.dirname(__file__), 'data/bnc_wiktionary_corpus.txt')).read().lower()))
#functions
def correct_word(word_string):
'''
Finds all valid one and two letter corrections for word_string, returning the word
with the highest relative probability as type str.
'''
if word_string is None:
return ""
elif isinstance(word_string, str):
return max(find_candidates(word_string), key=find_word_prob)
else:
raise InputError("string or none type variable not passed as argument to correct_word")
def find_candidates(word_string):
'''
Finds all potential words word_string could have intended to mean. If a word is not incorrectly
spelled, it will return this word first, else if will look for one letter edits that are correct.
If there are no valid one letter edits, it will perform a two letter edit search.
If valid corrections are found, all are returned as a set instance. Should a valid word not be
found, the original word is returned as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (validate_words([word_string]) or validate_words(list(find_one_letter_edits(word_string)))
or validate_words(list(find_two_letter_edits(word_string))) or set([word_string]))
else:
raise InputError("string or none type variable not passed as argument to find_candidates")
def find_one_letter_edits(word_string):
'''
Finds all possible one letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
Returns all one letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
splits = [(word_string[:i], word_string[i:]) for i in range(len(word_string) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in EN_ALPHABET]
inserts = [L + c + R for L, R in splits for c in EN_ALPHABET]
return set(deletes + transposes + replaces + inserts)
else:
raise InputError("string or none type variable not passed as argument to find_one_letter_edits")
def find_word_prob(word_string, word_total=sum(WORD_DISTRIBUTION.values())):
'''
Finds the relative probability of the word appearing given context of a base corpus.
Returns this probability value as a float instance.
'''
if word_string is None:
return 0
elif isinstance(word_string, str):
return WORD_DISTRIBUTION[word_string] / word_total
else:
raise InputError("string or none type variable not passed as argument to find_word_prob")
def validate_words(word_list):
'''
Checks for each edited word in word_list if that word is a valid english word.abs
Returns all validated words as a set instance.
'''
if word_list is None:
return {}
elif isinstance(word_list, list):
if not word_list:
return {}
else:
return set(word for word in word_list if word in WORD_DISTRIBUTION)
else:
raise InputError("list variable not passed as argument to validate_words")
|
SpotlightData/preprocessing | preprocessing/spellcheck.py | find_word_prob | python | def find_word_prob(word_string, word_total=sum(WORD_DISTRIBUTION.values())):
'''
Finds the relative probability of the word appearing given context of a base corpus.
Returns this probability value as a float instance.
'''
if word_string is None:
return 0
elif isinstance(word_string, str):
return WORD_DISTRIBUTION[word_string] / word_total
else:
raise InputError("string or none type variable not passed as argument to find_word_prob") | Finds the relative probability of the word appearing given context of a base corpus.
Returns this probability value as a float instance. | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/spellcheck.py#L91-L101 | null | '''
Spelling checker module:
'''
import re
from os import path
from collections import Counter
from preprocessing.errors import InputError
EN_ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
WORD_DISTRIBUTION = Counter(re.findall(r'\w+', open(path.join(path.dirname(__file__), 'data/bnc_wiktionary_corpus.txt')).read().lower()))
#functions
def correct_word(word_string):
'''
Finds all valid one and two letter corrections for word_string, returning the word
with the highest relative probability as type str.
'''
if word_string is None:
return ""
elif isinstance(word_string, str):
return max(find_candidates(word_string), key=find_word_prob)
else:
raise InputError("string or none type variable not passed as argument to correct_word")
def find_candidates(word_string):
'''
Finds all potential words word_string could have intended to mean. If a word is not incorrectly
spelled, it will return this word first, else if will look for one letter edits that are correct.
If there are no valid one letter edits, it will perform a two letter edit search.
If valid corrections are found, all are returned as a set instance. Should a valid word not be
found, the original word is returned as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (validate_words([word_string]) or validate_words(list(find_one_letter_edits(word_string)))
or validate_words(list(find_two_letter_edits(word_string))) or set([word_string]))
else:
raise InputError("string or none type variable not passed as argument to find_candidates")
def find_one_letter_edits(word_string):
'''
Finds all possible one letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
Returns all one letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
splits = [(word_string[:i], word_string[i:]) for i in range(len(word_string) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in EN_ALPHABET]
inserts = [L + c + R for L, R in splits for c in EN_ALPHABET]
return set(deletes + transposes + replaces + inserts)
else:
raise InputError("string or none type variable not passed as argument to find_one_letter_edits")
def find_two_letter_edits(word_string):
'''
Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (e2 for e1 in find_one_letter_edits(word_string) for e2 in find_one_letter_edits(e1))
else:
raise InputError("string or none type variable not passed as argument to find_two_letter_edits")
def validate_words(word_list):
'''
Checks for each edited word in word_list if that word is a valid english word.abs
Returns all validated words as a set instance.
'''
if word_list is None:
return {}
elif isinstance(word_list, list):
if not word_list:
return {}
else:
return set(word for word in word_list if word in WORD_DISTRIBUTION)
else:
raise InputError("list variable not passed as argument to validate_words")
|
SpotlightData/preprocessing | preprocessing/spellcheck.py | validate_words | python | def validate_words(word_list):
'''
Checks for each edited word in word_list if that word is a valid english word.abs
Returns all validated words as a set instance.
'''
if word_list is None:
return {}
elif isinstance(word_list, list):
if not word_list:
return {}
else:
return set(word for word in word_list if word in WORD_DISTRIBUTION)
else:
raise InputError("list variable not passed as argument to validate_words") | Checks for each edited word in word_list if that word is a valid english word.abs
Returns all validated words as a set instance. | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/spellcheck.py#L103-L116 | null | '''
Spelling checker module:
'''
import re
from os import path
from collections import Counter
from preprocessing.errors import InputError
EN_ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
WORD_DISTRIBUTION = Counter(re.findall(r'\w+', open(path.join(path.dirname(__file__), 'data/bnc_wiktionary_corpus.txt')).read().lower()))
#functions
def correct_word(word_string):
'''
Finds all valid one and two letter corrections for word_string, returning the word
with the highest relative probability as type str.
'''
if word_string is None:
return ""
elif isinstance(word_string, str):
return max(find_candidates(word_string), key=find_word_prob)
else:
raise InputError("string or none type variable not passed as argument to correct_word")
def find_candidates(word_string):
'''
Finds all potential words word_string could have intended to mean. If a word is not incorrectly
spelled, it will return this word first, else if will look for one letter edits that are correct.
If there are no valid one letter edits, it will perform a two letter edit search.
If valid corrections are found, all are returned as a set instance. Should a valid word not be
found, the original word is returned as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (validate_words([word_string]) or validate_words(list(find_one_letter_edits(word_string)))
or validate_words(list(find_two_letter_edits(word_string))) or set([word_string]))
else:
raise InputError("string or none type variable not passed as argument to find_candidates")
def find_one_letter_edits(word_string):
'''
Finds all possible one letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
Returns all one letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
splits = [(word_string[:i], word_string[i:]) for i in range(len(word_string) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in EN_ALPHABET]
inserts = [L + c + R for L, R in splits for c in EN_ALPHABET]
return set(deletes + transposes + replaces + inserts)
else:
raise InputError("string or none type variable not passed as argument to find_one_letter_edits")
def find_two_letter_edits(word_string):
'''
Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance.
'''
if word_string is None:
return {}
elif isinstance(word_string, str):
return (e2 for e1 in find_one_letter_edits(word_string) for e2 in find_one_letter_edits(e1))
else:
raise InputError("string or none type variable not passed as argument to find_two_letter_edits")
def find_word_prob(word_string, word_total=sum(WORD_DISTRIBUTION.values())):
'''
Finds the relative probability of the word appearing given context of a base corpus.
Returns this probability value as a float instance.
'''
if word_string is None:
return 0
elif isinstance(word_string, str):
return WORD_DISTRIBUTION[word_string] / word_total
else:
raise InputError("string or none type variable not passed as argument to find_word_prob")
|
SpotlightData/preprocessing | preprocessing/text.py | convert_html_entities | python | def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string") | Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L33-L51 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | convert_ligatures | python | def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument") | Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L53-L73 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | correct_spelling | python | def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument") | Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L75-L98 | [
"def correct_word(word_string):\n '''\n Finds all valid one and two letter corrections for word_string, returning the word\n with the highest relative probability as type str.\n '''\n if word_string is None:\n return \"\"\n elif isinstance(word_string, str):\n return max(find_candidates(word_string), key=find_word_prob)\n else:\n raise InputError(\"string or none type variable not passed as argument to correct_word\")\n"
] | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | create_sentence_list | python | def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list") | Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L100-L118 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | keyword_tokenize | python | def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string") | Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L120-L138 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | lemmatize | python | def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument") | Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L140-L157 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | lowercase | python | def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string") | Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L159-L176 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | preprocess_text | python | def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string") | Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L178-L208 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | remove_esc_chars | python | def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument") | Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L210-L227 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | remove_numbers | python | def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument") | Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L229-L246 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | remove_number_words | python | def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument") | Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L248-L268 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | remove_time_words | python | def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument") | Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L270-L290 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | remove_unbound_punct | python | def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument") | Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L292-L311 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | remove_urls | python | def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument") | Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L313-L330 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument")
|
SpotlightData/preprocessing | preprocessing/text.py | remove_whitespace | python | def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument") | Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument | train | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L332-L349 | null | '''
Text pre-processing module:
'''
from preprocessing.errors import FunctionError, InputError
import preprocessing.spellcheck as spellcheck
import html
import json
from os import path
import re
import string
import nltk.data
nltk.data.path = [path.join(path.dirname(__file__), "data")]
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
KEYWORD_TOKENIZER = RegexpTokenizer(r'\b[\w.\/,-]+\b|[-.,\/()]')
LEMMATIZER = WordNetLemmatizer()
LIGATURES = json.load(open(path.join(path.dirname(__file__), "data/latin_characters.json"), "r"))
NUMBER_WORDS = [NUMBER_WORD.replace("\n", "") for NUMBER_WORD in open(path.join(path.dirname(__file__), "data/word_numbers.txt"), "r").readlines()]
PUNCT = string.punctuation
STOPWORDS = stopwords.words("english")
SENTENCE_TOKENIZER = nltk.data.load("tokenizers/punkt/english.pickle")
TIME_WORDS = [TIME_WORD.replace("\n", "") for TIME_WORD in open(path.join(path.dirname(__file__), "data/word_time.txt"), "r").readlines()]
#functions
def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument")
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument")
def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list")
def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string")
def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument")
def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string")
def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string")
def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument")
def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_time_words(text_string):
'''
Removes any word associated to time (day, week, month, etc.) within text_string and returns the
new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in TIME_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument")
def remove_unbound_punct(text_string):
'''
Removes all punctuation unattached from a non-whitespace or attached to another punctuation
character unexpectedly (e.g. ".;';") within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r''.join([r'[', PUNCT, r'][', PUNCT, r']+|\B[', PUNCT, r']+']), "",
text_string).split())
else:
raise InputError("string not passed as argument")
def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument")
|
mezz64/pyEmby | pyemby/device.py | EmbyDevice.media_artist | python | def media_artist(self):
try:
artists = self.session['NowPlayingItem']['Artists']
if len(artists) > 1:
return artists[0]
else:
return artists
except KeyError:
return None | Artist of current playing media (Music track only). | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/device.py#L121-L130 | null | class EmbyDevice(object):
""" Represents properties of an Emby Device. """
def __init__(self, session, server):
"""Initialize Emby device object."""
self.server = server
self.is_active = True
self.update_data(session)
def update_data(self, session):
""" Update session object. """
self.session = session
def set_active(self, active):
""" Mark device as on/off. """
self.is_active = active
@property
def session_raw(self):
""" Return raw session data. """
return self.session
@property
def session_id(self):
""" Return current session Id. """
try:
return self.session['Id']
except KeyError:
return None
@property
def unique_id(self):
""" Return device id."""
try:
return self.session['DeviceId']
except KeyError:
return None
@property
def name(self):
""" Return device name."""
try:
return self.session['DeviceName']
except KeyError:
return None
@property
def client(self):
""" Return client name. """
try:
return self.session['Client']
except KeyError:
return None
@property
def username(self):
""" Return device name."""
try:
return self.session['UserName']
except KeyError:
return None
@property
def media_title(self):
""" Return title currently playing."""
try:
return self.session['NowPlayingItem']['Name']
except KeyError:
return None
@property
def media_season(self):
"""Season of curent playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['ParentIndexNumber']
except KeyError:
return None
@property
def media_series_title(self):
"""The title of the series of current playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['SeriesName']
except KeyError:
return None
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['IndexNumber']
except KeyError:
return None
@property
def media_album_name(self):
"""Album name of current playing media (Music track only)."""
try:
return self.session['NowPlayingItem']['Album']
except KeyError:
return None
@property
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
try:
return self.session['NowPlayingItem']['AlbumArtist']
except KeyError:
return None
@property
def media_id(self):
""" Return title currently playing."""
try:
return self.session['NowPlayingItem']['Id']
except KeyError:
return None
@property
def media_type(self):
""" Return type currently playing."""
try:
return self.session['NowPlayingItem']['Type']
except KeyError:
return None
@property
def media_image_url_deprecated(self):
"""Image url of current playing media."""
if self.is_nowplaying:
base = self.server.construct_url(API_URL)
try:
image_id = self.session['NowPlayingItem']['ThumbItemId']
image_type = 'Thumb'
except KeyError:
try:
image_id = self.session[
'NowPlayingItem']['PrimaryImageItemId']
image_type = 'Primary'
except KeyError:
return None
url = '{0}/Items/{1}/Images/{2}?api_key={3}'.format(
base, image_id, image_type, self.server.api_key)
return url
else:
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.is_nowplaying:
base = self.server.construct_url(API_URL)
try:
image_id = self.session['NowPlayingItem']['ImageTags']['Thumb']
image_type = 'Thumb'
except KeyError:
try:
image_id = self.session[
'NowPlayingItem']['ImageTags']['Primary']
image_type = 'Primary'
except KeyError:
return None
url = '{0}/Items/{1}/Images/{2}?width=500&tag={3}&api_key={4}'.format(
base, self.media_id, image_type, image_id, self.server.api_key)
return url
else:
return None
@property
def media_position(self):
""" Return position currently playing."""
try:
return int(self.session['PlayState']['PositionTicks']) / 10000000
except KeyError:
return None
@property
def media_runtime(self):
""" Return total runtime length."""
try:
return int(
self.session['NowPlayingItem']['RunTimeTicks']) / 10000000
except KeyError:
return None
@property
def media_percent_played(self):
""" Return media percent played. """
try:
return (self.media_position / self.media_runtime) * 100
except TypeError:
return None
@property
def state(self):
""" Return current playstate of the device. """
if self.is_active:
if 'NowPlayingItem' in self.session:
if self.session['PlayState']['IsPaused']:
return STATE_PAUSED
else:
return STATE_PLAYING
else:
return STATE_IDLE
else:
return STATE_OFF
@property
def is_nowplaying(self):
""" Return true if an item is currently active. """
if self.state == 'Idle' or self.state == 'Off':
return False
else:
return True
@property
def supports_remote_control(self):
""" Return remote control status. """
return self.session['SupportsRemoteControl']
async def set_playstate(self, state, pos=0):
""" Send media commands to server. """
url = '{}/Sessions/{}/Playing/{}'.format(
self.server.construct_url(API_URL), self.session_id, state)
params = {'api_key': self.server.api_key}
if state == 'seek':
params['SeekPositionTicks'] = int(pos * 10000000)
params['static'] = 'true'
_LOGGER.debug('Playstate URL: %s', url)
post = await self.server.api_post(url, params)
if post is None:
_LOGGER.debug('Error sending command.')
else:
_LOGGER.debug('Post response: %s', post)
def media_play(self):
""" Send play command to device. """
return self.set_playstate('unpause')
def media_pause(self):
""" Send pause command to device. """
return self.set_playstate('pause')
def media_stop(self):
""" Send stop command to device. """
return self.set_playstate('stop')
def media_next(self):
""" Send next track command to device. """
return self.set_playstate('nexttrack')
def media_previous(self):
""" Send previous track command to device. """
return self.set_playstate('previoustrack')
def media_seek(self, position):
""" Send seek command to device. """
return self.set_playstate('seek', position)
|
mezz64/pyEmby | pyemby/device.py | EmbyDevice.media_image_url | python | def media_image_url(self):
if self.is_nowplaying:
base = self.server.construct_url(API_URL)
try:
image_id = self.session['NowPlayingItem']['ImageTags']['Thumb']
image_type = 'Thumb'
except KeyError:
try:
image_id = self.session[
'NowPlayingItem']['ImageTags']['Primary']
image_type = 'Primary'
except KeyError:
return None
url = '{0}/Items/{1}/Images/{2}?width=500&tag={3}&api_key={4}'.format(
base, self.media_id, image_type, image_id, self.server.api_key)
return url
else:
return None | Image url of current playing media. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/device.py#L178-L196 | null | class EmbyDevice(object):
""" Represents properties of an Emby Device. """
def __init__(self, session, server):
"""Initialize Emby device object."""
self.server = server
self.is_active = True
self.update_data(session)
def update_data(self, session):
""" Update session object. """
self.session = session
def set_active(self, active):
""" Mark device as on/off. """
self.is_active = active
@property
def session_raw(self):
""" Return raw session data. """
return self.session
@property
def session_id(self):
""" Return current session Id. """
try:
return self.session['Id']
except KeyError:
return None
@property
def unique_id(self):
""" Return device id."""
try:
return self.session['DeviceId']
except KeyError:
return None
@property
def name(self):
""" Return device name."""
try:
return self.session['DeviceName']
except KeyError:
return None
@property
def client(self):
""" Return client name. """
try:
return self.session['Client']
except KeyError:
return None
@property
def username(self):
""" Return device name."""
try:
return self.session['UserName']
except KeyError:
return None
@property
def media_title(self):
""" Return title currently playing."""
try:
return self.session['NowPlayingItem']['Name']
except KeyError:
return None
@property
def media_season(self):
"""Season of curent playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['ParentIndexNumber']
except KeyError:
return None
@property
def media_series_title(self):
"""The title of the series of current playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['SeriesName']
except KeyError:
return None
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['IndexNumber']
except KeyError:
return None
@property
def media_album_name(self):
"""Album name of current playing media (Music track only)."""
try:
return self.session['NowPlayingItem']['Album']
except KeyError:
return None
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
try:
artists = self.session['NowPlayingItem']['Artists']
if len(artists) > 1:
return artists[0]
else:
return artists
except KeyError:
return None
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
try:
return self.session['NowPlayingItem']['AlbumArtist']
except KeyError:
return None
@property
def media_id(self):
""" Return title currently playing."""
try:
return self.session['NowPlayingItem']['Id']
except KeyError:
return None
@property
def media_type(self):
""" Return type currently playing."""
try:
return self.session['NowPlayingItem']['Type']
except KeyError:
return None
@property
def media_image_url_deprecated(self):
"""Image url of current playing media."""
if self.is_nowplaying:
base = self.server.construct_url(API_URL)
try:
image_id = self.session['NowPlayingItem']['ThumbItemId']
image_type = 'Thumb'
except KeyError:
try:
image_id = self.session[
'NowPlayingItem']['PrimaryImageItemId']
image_type = 'Primary'
except KeyError:
return None
url = '{0}/Items/{1}/Images/{2}?api_key={3}'.format(
base, image_id, image_type, self.server.api_key)
return url
else:
return None
@property
@property
def media_position(self):
""" Return position currently playing."""
try:
return int(self.session['PlayState']['PositionTicks']) / 10000000
except KeyError:
return None
@property
def media_runtime(self):
""" Return total runtime length."""
try:
return int(
self.session['NowPlayingItem']['RunTimeTicks']) / 10000000
except KeyError:
return None
@property
def media_percent_played(self):
""" Return media percent played. """
try:
return (self.media_position / self.media_runtime) * 100
except TypeError:
return None
@property
def state(self):
""" Return current playstate of the device. """
if self.is_active:
if 'NowPlayingItem' in self.session:
if self.session['PlayState']['IsPaused']:
return STATE_PAUSED
else:
return STATE_PLAYING
else:
return STATE_IDLE
else:
return STATE_OFF
@property
def is_nowplaying(self):
""" Return true if an item is currently active. """
if self.state == 'Idle' or self.state == 'Off':
return False
else:
return True
@property
def supports_remote_control(self):
""" Return remote control status. """
return self.session['SupportsRemoteControl']
async def set_playstate(self, state, pos=0):
""" Send media commands to server. """
url = '{}/Sessions/{}/Playing/{}'.format(
self.server.construct_url(API_URL), self.session_id, state)
params = {'api_key': self.server.api_key}
if state == 'seek':
params['SeekPositionTicks'] = int(pos * 10000000)
params['static'] = 'true'
_LOGGER.debug('Playstate URL: %s', url)
post = await self.server.api_post(url, params)
if post is None:
_LOGGER.debug('Error sending command.')
else:
_LOGGER.debug('Post response: %s', post)
def media_play(self):
""" Send play command to device. """
return self.set_playstate('unpause')
def media_pause(self):
""" Send pause command to device. """
return self.set_playstate('pause')
def media_stop(self):
""" Send stop command to device. """
return self.set_playstate('stop')
def media_next(self):
""" Send next track command to device. """
return self.set_playstate('nexttrack')
def media_previous(self):
""" Send previous track command to device. """
return self.set_playstate('previoustrack')
def media_seek(self, position):
""" Send seek command to device. """
return self.set_playstate('seek', position)
|
mezz64/pyEmby | pyemby/device.py | EmbyDevice.state | python | def state(self):
if self.is_active:
if 'NowPlayingItem' in self.session:
if self.session['PlayState']['IsPaused']:
return STATE_PAUSED
else:
return STATE_PLAYING
else:
return STATE_IDLE
else:
return STATE_OFF | Return current playstate of the device. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/device.py#L224-L235 | null | class EmbyDevice(object):
""" Represents properties of an Emby Device. """
def __init__(self, session, server):
"""Initialize Emby device object."""
self.server = server
self.is_active = True
self.update_data(session)
def update_data(self, session):
""" Update session object. """
self.session = session
def set_active(self, active):
""" Mark device as on/off. """
self.is_active = active
@property
def session_raw(self):
""" Return raw session data. """
return self.session
@property
def session_id(self):
""" Return current session Id. """
try:
return self.session['Id']
except KeyError:
return None
@property
def unique_id(self):
""" Return device id."""
try:
return self.session['DeviceId']
except KeyError:
return None
@property
def name(self):
""" Return device name."""
try:
return self.session['DeviceName']
except KeyError:
return None
@property
def client(self):
""" Return client name. """
try:
return self.session['Client']
except KeyError:
return None
@property
def username(self):
""" Return device name."""
try:
return self.session['UserName']
except KeyError:
return None
@property
def media_title(self):
""" Return title currently playing."""
try:
return self.session['NowPlayingItem']['Name']
except KeyError:
return None
@property
def media_season(self):
"""Season of curent playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['ParentIndexNumber']
except KeyError:
return None
@property
def media_series_title(self):
"""The title of the series of current playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['SeriesName']
except KeyError:
return None
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['IndexNumber']
except KeyError:
return None
@property
def media_album_name(self):
"""Album name of current playing media (Music track only)."""
try:
return self.session['NowPlayingItem']['Album']
except KeyError:
return None
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
try:
artists = self.session['NowPlayingItem']['Artists']
if len(artists) > 1:
return artists[0]
else:
return artists
except KeyError:
return None
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
try:
return self.session['NowPlayingItem']['AlbumArtist']
except KeyError:
return None
@property
def media_id(self):
""" Return title currently playing."""
try:
return self.session['NowPlayingItem']['Id']
except KeyError:
return None
@property
def media_type(self):
""" Return type currently playing."""
try:
return self.session['NowPlayingItem']['Type']
except KeyError:
return None
@property
def media_image_url_deprecated(self):
"""Image url of current playing media."""
if self.is_nowplaying:
base = self.server.construct_url(API_URL)
try:
image_id = self.session['NowPlayingItem']['ThumbItemId']
image_type = 'Thumb'
except KeyError:
try:
image_id = self.session[
'NowPlayingItem']['PrimaryImageItemId']
image_type = 'Primary'
except KeyError:
return None
url = '{0}/Items/{1}/Images/{2}?api_key={3}'.format(
base, image_id, image_type, self.server.api_key)
return url
else:
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.is_nowplaying:
base = self.server.construct_url(API_URL)
try:
image_id = self.session['NowPlayingItem']['ImageTags']['Thumb']
image_type = 'Thumb'
except KeyError:
try:
image_id = self.session[
'NowPlayingItem']['ImageTags']['Primary']
image_type = 'Primary'
except KeyError:
return None
url = '{0}/Items/{1}/Images/{2}?width=500&tag={3}&api_key={4}'.format(
base, self.media_id, image_type, image_id, self.server.api_key)
return url
else:
return None
@property
def media_position(self):
""" Return position currently playing."""
try:
return int(self.session['PlayState']['PositionTicks']) / 10000000
except KeyError:
return None
@property
def media_runtime(self):
""" Return total runtime length."""
try:
return int(
self.session['NowPlayingItem']['RunTimeTicks']) / 10000000
except KeyError:
return None
@property
def media_percent_played(self):
""" Return media percent played. """
try:
return (self.media_position / self.media_runtime) * 100
except TypeError:
return None
@property
@property
def is_nowplaying(self):
""" Return true if an item is currently active. """
if self.state == 'Idle' or self.state == 'Off':
return False
else:
return True
@property
def supports_remote_control(self):
""" Return remote control status. """
return self.session['SupportsRemoteControl']
async def set_playstate(self, state, pos=0):
""" Send media commands to server. """
url = '{}/Sessions/{}/Playing/{}'.format(
self.server.construct_url(API_URL), self.session_id, state)
params = {'api_key': self.server.api_key}
if state == 'seek':
params['SeekPositionTicks'] = int(pos * 10000000)
params['static'] = 'true'
_LOGGER.debug('Playstate URL: %s', url)
post = await self.server.api_post(url, params)
if post is None:
_LOGGER.debug('Error sending command.')
else:
_LOGGER.debug('Post response: %s', post)
def media_play(self):
""" Send play command to device. """
return self.set_playstate('unpause')
def media_pause(self):
""" Send pause command to device. """
return self.set_playstate('pause')
def media_stop(self):
""" Send stop command to device. """
return self.set_playstate('stop')
def media_next(self):
""" Send next track command to device. """
return self.set_playstate('nexttrack')
def media_previous(self):
""" Send previous track command to device. """
return self.set_playstate('previoustrack')
def media_seek(self, position):
""" Send seek command to device. """
return self.set_playstate('seek', position)
|
mezz64/pyEmby | pyemby/device.py | EmbyDevice.set_playstate | python | async def set_playstate(self, state, pos=0):
url = '{}/Sessions/{}/Playing/{}'.format(
self.server.construct_url(API_URL), self.session_id, state)
params = {'api_key': self.server.api_key}
if state == 'seek':
params['SeekPositionTicks'] = int(pos * 10000000)
params['static'] = 'true'
_LOGGER.debug('Playstate URL: %s', url)
post = await self.server.api_post(url, params)
if post is None:
_LOGGER.debug('Error sending command.')
else:
_LOGGER.debug('Post response: %s', post) | Send media commands to server. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/device.py#L250-L266 | null | class EmbyDevice(object):
""" Represents properties of an Emby Device. """
def __init__(self, session, server):
"""Initialize Emby device object."""
self.server = server
self.is_active = True
self.update_data(session)
def update_data(self, session):
""" Update session object. """
self.session = session
def set_active(self, active):
""" Mark device as on/off. """
self.is_active = active
@property
def session_raw(self):
""" Return raw session data. """
return self.session
@property
def session_id(self):
""" Return current session Id. """
try:
return self.session['Id']
except KeyError:
return None
@property
def unique_id(self):
""" Return device id."""
try:
return self.session['DeviceId']
except KeyError:
return None
@property
def name(self):
""" Return device name."""
try:
return self.session['DeviceName']
except KeyError:
return None
@property
def client(self):
""" Return client name. """
try:
return self.session['Client']
except KeyError:
return None
@property
def username(self):
""" Return device name."""
try:
return self.session['UserName']
except KeyError:
return None
@property
def media_title(self):
""" Return title currently playing."""
try:
return self.session['NowPlayingItem']['Name']
except KeyError:
return None
@property
def media_season(self):
"""Season of curent playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['ParentIndexNumber']
except KeyError:
return None
@property
def media_series_title(self):
"""The title of the series of current playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['SeriesName']
except KeyError:
return None
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
try:
return self.session['NowPlayingItem']['IndexNumber']
except KeyError:
return None
@property
def media_album_name(self):
"""Album name of current playing media (Music track only)."""
try:
return self.session['NowPlayingItem']['Album']
except KeyError:
return None
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
try:
artists = self.session['NowPlayingItem']['Artists']
if len(artists) > 1:
return artists[0]
else:
return artists
except KeyError:
return None
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
try:
return self.session['NowPlayingItem']['AlbumArtist']
except KeyError:
return None
@property
def media_id(self):
""" Return title currently playing."""
try:
return self.session['NowPlayingItem']['Id']
except KeyError:
return None
@property
def media_type(self):
""" Return type currently playing."""
try:
return self.session['NowPlayingItem']['Type']
except KeyError:
return None
@property
def media_image_url_deprecated(self):
"""Image url of current playing media."""
if self.is_nowplaying:
base = self.server.construct_url(API_URL)
try:
image_id = self.session['NowPlayingItem']['ThumbItemId']
image_type = 'Thumb'
except KeyError:
try:
image_id = self.session[
'NowPlayingItem']['PrimaryImageItemId']
image_type = 'Primary'
except KeyError:
return None
url = '{0}/Items/{1}/Images/{2}?api_key={3}'.format(
base, image_id, image_type, self.server.api_key)
return url
else:
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.is_nowplaying:
base = self.server.construct_url(API_URL)
try:
image_id = self.session['NowPlayingItem']['ImageTags']['Thumb']
image_type = 'Thumb'
except KeyError:
try:
image_id = self.session[
'NowPlayingItem']['ImageTags']['Primary']
image_type = 'Primary'
except KeyError:
return None
url = '{0}/Items/{1}/Images/{2}?width=500&tag={3}&api_key={4}'.format(
base, self.media_id, image_type, image_id, self.server.api_key)
return url
else:
return None
@property
def media_position(self):
""" Return position currently playing."""
try:
return int(self.session['PlayState']['PositionTicks']) / 10000000
except KeyError:
return None
@property
def media_runtime(self):
""" Return total runtime length."""
try:
return int(
self.session['NowPlayingItem']['RunTimeTicks']) / 10000000
except KeyError:
return None
@property
def media_percent_played(self):
""" Return media percent played. """
try:
return (self.media_position / self.media_runtime) * 100
except TypeError:
return None
@property
def state(self):
""" Return current playstate of the device. """
if self.is_active:
if 'NowPlayingItem' in self.session:
if self.session['PlayState']['IsPaused']:
return STATE_PAUSED
else:
return STATE_PLAYING
else:
return STATE_IDLE
else:
return STATE_OFF
@property
def is_nowplaying(self):
""" Return true if an item is currently active. """
if self.state == 'Idle' or self.state == 'Off':
return False
else:
return True
@property
def supports_remote_control(self):
""" Return remote control status. """
return self.session['SupportsRemoteControl']
def media_play(self):
""" Send play command to device. """
return self.set_playstate('unpause')
def media_pause(self):
""" Send pause command to device. """
return self.set_playstate('pause')
def media_stop(self):
""" Send stop command to device. """
return self.set_playstate('stop')
def media_next(self):
""" Send next track command to device. """
return self.set_playstate('nexttrack')
def media_previous(self):
""" Send previous track command to device. """
return self.set_playstate('previoustrack')
def media_seek(self, position):
""" Send seek command to device. """
return self.set_playstate('seek', position)
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.add_new_devices_callback | python | def add_new_devices_callback(self, callback):
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback) | Register as callback for when new devices are added. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L114-L117 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer._do_new_devices_callback | python | def _do_new_devices_callback(self, msg):
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg) | Call registered callback functions. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L119-L123 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.add_stale_devices_callback | python | def add_stale_devices_callback(self, callback):
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback) | Register as callback for when stale devices exist. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L125-L128 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer._do_stale_devices_callback | python | def _do_stale_devices_callback(self, msg):
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg) | Call registered callback functions. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L130-L134 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.add_update_callback | python | def add_update_callback(self, callback, device):
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device) | Register as callback for when a matching device changes. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L136-L139 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.remove_update_callback | python | def remove_update_callback(self, callback, device):
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device) | Remove a registered update callback. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L141-L146 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer._do_update_callback | python | def _do_update_callback(self, msg):
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg) | Call registered callback functions. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L148-L154 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.start | python | def start(self):
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.") | Public method for initiating connectivity with the emby server. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L156-L164 | [
"async def register(self):\n \"\"\"Register library device id and get initial device list. \"\"\"\n url = '{}/Sessions'.format(self.construct_url(API_URL))\n params = {'api_key': self._api_key}\n\n reg = await self.api_request(url, params)\n if reg is None:\n self._registered = False\n _LOGGER.error('Unable to register emby client.')\n else:\n self._registered = True\n _LOGGER.info('Emby client registered!, Id: %s', self.unique_id)\n self._sessions = reg\n\n # Build initial device list.\n self.update_device_list(self._sessions)\n\n asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)\n"
] | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.stop | python | async def stop(self):
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop) | Async method for stopping connectivity with the emby server. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L166-L177 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.construct_url | python | def construct_url(self, style):
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None | Return http/https or ws/wss url. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L179-L192 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.register | python | async def register(self):
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop) | Register library device id and get initial device list. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L194-L211 | [
"def construct_url(self, style):\n \"\"\" Return http/https or ws/wss url. \"\"\"\n if style is API_URL:\n if self._ssl:\n return 'https://{}:{}'.format(self._host, self._port)\n else:\n return 'http://{}:{}'.format(self._host, self._port)\n elif style is SOCKET_URL:\n if self._ssl:\n return 'wss://{}:{}'.format(self._host, self._port)\n else:\n return 'ws://{}:{}'.format(self._host, self._port)\n else:\n return None\n",
"async def api_request(self, url, params):\n \"\"\"Make api fetch request.\"\"\"\n request = None\n try:\n with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):\n request = await self._api_session.get(\n url, params=params)\n if request.status != 200:\n _LOGGER.error('Error fetching Emby data: %s', request.status)\n return None\n\n request_json = await request.json()\n if 'error' in request_json:\n _LOGGER.error('Error converting Emby data to json: %s: %s',\n request_json['error']['code'],\n request_json['error']['message'])\n return None\n return request_json\n except (aiohttp.ClientError, asyncio.TimeoutError,\n ConnectionRefusedError) as err:\n _LOGGER.error('Error fetching Emby data: %s', err)\n return None\n"
] | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.api_post | python | async def api_post(self, url, params):
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None | Make api post request. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L213-L230 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.api_request | python | async def api_request(self, url, params):
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None | Make api fetch request. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L232-L253 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.socket_connection | python | async def socket_connection(self):
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break | Open websocket connection. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L255-L307 | [
"def construct_url(self, style):\n \"\"\" Return http/https or ws/wss url. \"\"\"\n if style is API_URL:\n if self._ssl:\n return 'https://{}:{}'.format(self._host, self._port)\n else:\n return 'http://{}:{}'.format(self._host, self._port)\n elif style is SOCKET_URL:\n if self._ssl:\n return 'wss://{}:{}'.format(self._host, self._port)\n else:\n return 'ws://{}:{}'.format(self._host, self._port)\n else:\n return None\n",
"def process_msg(self, msg):\n \"\"\"Process messages from the event stream.\"\"\"\n jmsg = json.loads(msg)\n msgtype = jmsg['MessageType']\n msgdata = jmsg['Data']\n\n _LOGGER.debug('New websocket message recieved of type: %s', msgtype)\n if msgtype == 'Sessions':\n self._sessions = msgdata\n # Check for new devices and update as needed.\n self.update_device_list(self._sessions)\n \"\"\"\n May process other message types in the future.\n Other known types are:\n - PlaybackStarted\n - PlaybackStopped\n - SessionEnded\n \"\"\"\n"
] | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.process_msg | python | def process_msg(self, msg):
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
""" | Process messages from the event stream. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L309-L326 | [
"def update_device_list(self, sessions):\n \"\"\" Update device list. \"\"\"\n if sessions is None:\n _LOGGER.error('Error updating Emby devices.')\n return\n\n new_devices = []\n active_devices = []\n dev_update = False\n for device in sessions:\n dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])\n\n try:\n _LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',\n dev_name, device['NowPlayingItem']['Type'],\n device['NowPlayingItem']['IsThemeMedia'])\n except KeyError:\n pass\n\n active_devices.append(dev_name)\n if dev_name not in self._devices and \\\n device['DeviceId'] != str(self._api_id):\n _LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',\n dev_name)\n new = EmbyDevice(device, self)\n self._devices[dev_name] = new\n new_devices.append(new)\n elif device['DeviceId'] != str(self._api_id):\n # Before we send in new data check for changes to state\n # to decide if we need to fire the update callback\n if not self._devices[dev_name].is_active:\n # Device wasn't active on the last update\n # We need to fire a device callback to let subs now\n dev_update = True\n\n do_update = self.update_check(\n self._devices[dev_name], device)\n self._devices[dev_name].update_data(device)\n self._devices[dev_name].set_active(True)\n if dev_update:\n self._do_new_devices_callback(0)\n dev_update = False\n if do_update:\n self._do_update_callback(dev_name)\n\n # Need to check for new inactive devices and flag\n for dev_id in self._devices:\n if dev_id not in active_devices:\n # Device no longer active\n if self._devices[dev_id].is_active:\n self._devices[dev_id].set_active(False)\n self._do_update_callback(dev_id)\n self._do_stale_devices_callback(dev_id)\n\n # Call device callback if new devices were found.\n if new_devices:\n self._do_new_devices_callback(0)\n"
] | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.update_device_list | python | def update_device_list(self, sessions):
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0) | Update device list. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L328-L384 | [
"def _do_new_devices_callback(self, msg):\n \"\"\"Call registered callback functions.\"\"\"\n for callback in self._new_devices_callbacks:\n _LOGGER.debug('Devices callback %s', callback)\n self._event_loop.call_soon(callback, msg)\n",
"def _do_stale_devices_callback(self, msg):\n \"\"\"Call registered callback functions.\"\"\"\n for callback in self._stale_devices_callbacks:\n _LOGGER.debug('Stale Devices callback %s', callback)\n self._event_loop.call_soon(callback, msg)\n",
"def _do_update_callback(self, msg):\n \"\"\"Call registered callback functions.\"\"\"\n for callback, device in self._update_callbacks:\n if device == msg:\n _LOGGER.debug('Update callback %s for device %s by %s',\n callback, device, msg)\n self._event_loop.call_soon(callback, msg)\n"
] | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/server.py | EmbyServer.update_check | python | def update_check(self, existing, new):
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False | Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L386-L424 | null | class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT,
loop=self._event_loop):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
|
mezz64/pyEmby | pyemby/helpers.py | deprecated_name | python | def deprecated_name(name):
def decorator(func):
"""Decorator function."""
def func_wrapper(self):
"""Wrapper for original function."""
if hasattr(self, name):
# Return the old property
return getattr(self, name)
else:
return func(self)
return func_wrapper
return decorator | Allow old method names for backwards compatability. | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/helpers.py#L10-L22 | null | """
pyemby.helpers
~~~~~~~~~~~~~~~~~~~~
Function helpers.
Copyright (c) 2017-2019 John Mihalic <https://github.com/mezz64>
Licensed under the MIT license.
"""
|
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpSession.recurse | python | def recurse(self, root_path, dir_cb, listing_cb, max_listing_size=0,
max_depth=MAX_REMOTE_RECURSION_DEPTH):
q = deque([(root_path, 0)])
collected = []
def push_file(path, file_path, entry):
collected.append((file_path, entry))
if max_listing_size > 0 and \
len(collected) >= max_listing_size:
listing_cb(path, collected)
# Clear contents on the list. We delete it this way so that
# we're only -modifying- the list rather than replacing it (a
# requirement of a closure).
del collected[:]
while q:
(path, current_depth) = q.popleft()
entries = self.listdir(path)
for entry in entries:
filename = stringify(entry.name)
file_path = ('%s/%s' % (path, filename))
if entry.is_symlink:
push_file(path, file_path, entry)
elif entry.is_directory:
if filename == '.' or filename == '..':
continue
if dir_cb is not None:
dir_cb(path, file_path, entry)
new_depth = current_depth + 1
if max_depth is None or new_depth <= max_depth:
q.append((file_path, new_depth))
elif entry.is_regular:
if listing_cb is not None:
push_file(path, file_path, entry)
if listing_cb is not None and (max_listing_size == 0 or
len(collected) > 0):
listing_cb(path, collected) | Recursively iterate a directory. Invoke callbacks for directories
and entries (both are optional, but it doesn't make sense unless one is
provided). "max_listing_size" will allow for the file-listing to be
chunked into manageable pieces. "max_depth" limited how deep recursion
goes. This can be used to make it easy to simply read a single
directory in chunks. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L517-L568 | [
"def stringify(s):\n if issubclass(s.__class__, (bytes, bytearray)):\n return s.decode('ascii')\n else:\n return s\n",
"def listdir(self, path, get_directories=True, get_files=True):\n return _sftp_listdir(self.__sftp_session_int, \n path, \n get_directories, \n get_files)\n",
"def remote_dir_cb(parent_path, full_path, entry):\n path_relations[full_path] = parent_path\n\n if parent_path not in parents:\n parents[parent_path] = [full_path]\n else:\n parents[parent_path].append(full_path)\n",
"def remote_listing_cb(parent_path, listing):\n for (file_path, entry) in listing:\n self.__log.debug(\"REMOTE: Unlinking: %s\" % (file_path))\n self.unlink(file_path)\n",
"def push_file(path, file_path, entry):\n collected.append((file_path, entry))\n if max_listing_size > 0 and \\\n len(collected) >= max_listing_size:\n listing_cb(path, collected)\n\n # Clear contents on the list. We delete it this way so that \n # we're only -modifying- the list rather than replacing it (a \n # requirement of a closure).\n del collected[:]\n"
] | class SftpSession(object):
def __init__(self, ssh_session):
self.__ssh_session_int = getattr(ssh_session,
'session_id',
ssh_session)
self.__log = logging.getLogger('SSH_SESSION(%s)' %
(self.__ssh_session_int))
def __enter__(self):
return self.open()
def open(self):
self.__sftp_session_int = _sftp_new(self.__ssh_session_int)
_sftp_init(self.__sftp_session_int)
return self
def __exit__(self, e_type, e_value, e_tb):
self.close()
def close(self):
_sftp_free(self.__sftp_session_int)
def stat(self, file_path):
return _sftp_stat(self.__sftp_session_int, file_path)
def rename(self, filepath_old, filepath_new):
return _sftp_rename(self.__sftp_session_int, filepath_old, filepath_new)
def chmod(self, file_path, mode):
return _sftp_chmod(self.__sftp_session_int, file_path, mode)
def chown(self, file_path, uid, gid):
return _sftp_chown(self.__sftp_session_int, file_path, uid, gid)
def exists(self, path):
return _sftp_exists(self.__sftp_session_int, path)
def mkdir(self, path, mode=0o755):
return _sftp_mkdir(self.__sftp_session_int, path, mode)
def rmdir(self, path):
return _sftp_rmdir(self.__sftp_session_int, path)
def lstat(self, file_path):
return _sftp_lstat(self.__sftp_session_int, file_path)
def unlink(self, file_path):
return _sftp_unlink(self.__sftp_session_int, file_path)
def readlink(self, file_path):
return _sftp_readlink(self.__sftp_session_int, file_path)
def symlink(self, to, from_):
return _sftp_symlink(self.__sftp_session_int, to, from_)
def setstat(self, file_path, entry_attributes):
return _sftp_setstat(self.__sftp_session_int, file_path, entry_attributes)
def listdir(self, path, get_directories=True, get_files=True):
return _sftp_listdir(self.__sftp_session_int,
path,
get_directories,
get_files)
def rmtree(self, path):
self.__log.debug("REMOTE: Doing recursive remove: %s" % (path))
# Collect names and heirarchy of subdirectories. Also, delete files
# that we encounter.
# If we don't put our root path in here, the recursive removal (at the
# end, below) will fail once it get's back to the top.
path_relations = { path: None }
parents = {}
def remote_dir_cb(parent_path, full_path, entry):
path_relations[full_path] = parent_path
if parent_path not in parents:
parents[parent_path] = [full_path]
else:
parents[parent_path].append(full_path)
def remote_listing_cb(parent_path, listing):
for (file_path, entry) in listing:
self.__log.debug("REMOTE: Unlinking: %s" % (file_path))
self.unlink(file_path)
self.recurse(path,
remote_dir_cb,
remote_listing_cb,
MAX_MIRROR_LISTING_CHUNK_SIZE)
# Now, delete the directories. Descend to leaves and work our way back.
self.__log.debug("REMOTE: Removing (%d) directories/subdirectories." %
(len(path_relations)))
def remove_directory(node_path, depth=0):
if depth > MAX_REMOTE_RECURSION_DEPTH:
raise SftpError("Remote rmtree recursed too deeply. Either "
"the directories run very deep, or we've "
"encountered a cycle (probably via hard-"
"links).")
if node_path in parents:
while parents[node_path]:
remove_directory(parents[node_path][0], depth + 1)
del parents[node_path][0]
self.__log.debug("REMOTE: Removing directory: %s" % (node_path))
self.rmdir(node_path)
# All children subdirectories have been deleted. Delete our parent
# record.
try:
del parents[node_path]
except KeyError:
pass
# Delete the mapping from us to our parent.
del path_relations[node_path]
remove_directory(path)
def recurse(self, root_path, dir_cb, listing_cb, max_listing_size=0,
max_depth=MAX_REMOTE_RECURSION_DEPTH):
"""Recursively iterate a directory. Invoke callbacks for directories
and entries (both are optional, but it doesn't make sense unless one is
provided). "max_listing_size" will allow for the file-listing to be
chunked into manageable pieces. "max_depth" limited how deep recursion
goes. This can be used to make it easy to simply read a single
directory in chunks.
"""
q = deque([(root_path, 0)])
collected = []
def push_file(path, file_path, entry):
collected.append((file_path, entry))
if max_listing_size > 0 and \
len(collected) >= max_listing_size:
listing_cb(path, collected)
# Clear contents on the list. We delete it this way so that
# we're only -modifying- the list rather than replacing it (a
# requirement of a closure).
del collected[:]
while q:
(path, current_depth) = q.popleft()
entries = self.listdir(path)
for entry in entries:
filename = stringify(entry.name)
file_path = ('%s/%s' % (path, filename))
if entry.is_symlink:
push_file(path, file_path, entry)
elif entry.is_directory:
if filename == '.' or filename == '..':
continue
if dir_cb is not None:
dir_cb(path, file_path, entry)
new_depth = current_depth + 1
if max_depth is None or new_depth <= max_depth:
q.append((file_path, new_depth))
elif entry.is_regular:
if listing_cb is not None:
push_file(path, file_path, entry)
if listing_cb is not None and (max_listing_size == 0 or
len(collected) > 0):
listing_cb(path, collected)
def write_to_local(self, filepath_from, filepath_to, mtime_dt=None):
"""Open a remote file and write it locally."""
self.__log.debug("Writing R[%s] -> L[%s]." % (filepath_from,
filepath_to))
with SftpFile(self, filepath_from, 'r') as sf_from:
with open(filepath_to, 'wb') as file_to:
while 1:
part = sf_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
file_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
mtime_epoch = mktime(mtime_dt.timetuple())
utime(filepath_to, (mtime_epoch, mtime_epoch))
def write_to_remote(self, filepath_from, filepath_to, mtime_dt=None):
"""Open a local file and write it remotely."""
self.__log.debug("Writing L[%s] -> R[%s]." % (filepath_from,
filepath_to))
with open(filepath_from, 'rb') as file_from:
with SftpFile(self, filepath_to, 'w') as sf_to:
while 1:
part = file_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
sf_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
self.utimes_dt(filepath_to, mtime_dt, mtime_dt)
def utimes(self, file_path, atime_epoch, mtime_epoch):
_sftp_utimes(self.__sftp_session_int,
file_path,
atime_epoch,
mtime_epoch)
def utimes_dt(self, file_path, atime_dt, mtime_dt):
_sftp_utimes_dt(self.__sftp_session_int, file_path, atime_dt, mtime_dt)
@property
def session_id(self):
return self.__sftp_session_int
|
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpSession.write_to_local | python | def write_to_local(self, filepath_from, filepath_to, mtime_dt=None):
self.__log.debug("Writing R[%s] -> L[%s]." % (filepath_from,
filepath_to))
with SftpFile(self, filepath_from, 'r') as sf_from:
with open(filepath_to, 'wb') as file_to:
while 1:
part = sf_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
file_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
mtime_epoch = mktime(mtime_dt.timetuple())
utime(filepath_to, (mtime_epoch, mtime_epoch)) | Open a remote file and write it locally. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L570-L589 | null | class SftpSession(object):
def __init__(self, ssh_session):
self.__ssh_session_int = getattr(ssh_session,
'session_id',
ssh_session)
self.__log = logging.getLogger('SSH_SESSION(%s)' %
(self.__ssh_session_int))
def __enter__(self):
return self.open()
def open(self):
self.__sftp_session_int = _sftp_new(self.__ssh_session_int)
_sftp_init(self.__sftp_session_int)
return self
def __exit__(self, e_type, e_value, e_tb):
self.close()
def close(self):
_sftp_free(self.__sftp_session_int)
def stat(self, file_path):
return _sftp_stat(self.__sftp_session_int, file_path)
def rename(self, filepath_old, filepath_new):
return _sftp_rename(self.__sftp_session_int, filepath_old, filepath_new)
def chmod(self, file_path, mode):
return _sftp_chmod(self.__sftp_session_int, file_path, mode)
def chown(self, file_path, uid, gid):
return _sftp_chown(self.__sftp_session_int, file_path, uid, gid)
def exists(self, path):
return _sftp_exists(self.__sftp_session_int, path)
def mkdir(self, path, mode=0o755):
return _sftp_mkdir(self.__sftp_session_int, path, mode)
def rmdir(self, path):
return _sftp_rmdir(self.__sftp_session_int, path)
def lstat(self, file_path):
return _sftp_lstat(self.__sftp_session_int, file_path)
def unlink(self, file_path):
return _sftp_unlink(self.__sftp_session_int, file_path)
def readlink(self, file_path):
return _sftp_readlink(self.__sftp_session_int, file_path)
def symlink(self, to, from_):
return _sftp_symlink(self.__sftp_session_int, to, from_)
def setstat(self, file_path, entry_attributes):
return _sftp_setstat(self.__sftp_session_int, file_path, entry_attributes)
def listdir(self, path, get_directories=True, get_files=True):
return _sftp_listdir(self.__sftp_session_int,
path,
get_directories,
get_files)
def rmtree(self, path):
self.__log.debug("REMOTE: Doing recursive remove: %s" % (path))
# Collect names and heirarchy of subdirectories. Also, delete files
# that we encounter.
# If we don't put our root path in here, the recursive removal (at the
# end, below) will fail once it get's back to the top.
path_relations = { path: None }
parents = {}
def remote_dir_cb(parent_path, full_path, entry):
path_relations[full_path] = parent_path
if parent_path not in parents:
parents[parent_path] = [full_path]
else:
parents[parent_path].append(full_path)
def remote_listing_cb(parent_path, listing):
for (file_path, entry) in listing:
self.__log.debug("REMOTE: Unlinking: %s" % (file_path))
self.unlink(file_path)
self.recurse(path,
remote_dir_cb,
remote_listing_cb,
MAX_MIRROR_LISTING_CHUNK_SIZE)
# Now, delete the directories. Descend to leaves and work our way back.
self.__log.debug("REMOTE: Removing (%d) directories/subdirectories." %
(len(path_relations)))
def remove_directory(node_path, depth=0):
if depth > MAX_REMOTE_RECURSION_DEPTH:
raise SftpError("Remote rmtree recursed too deeply. Either "
"the directories run very deep, or we've "
"encountered a cycle (probably via hard-"
"links).")
if node_path in parents:
while parents[node_path]:
remove_directory(parents[node_path][0], depth + 1)
del parents[node_path][0]
self.__log.debug("REMOTE: Removing directory: %s" % (node_path))
self.rmdir(node_path)
# All children subdirectories have been deleted. Delete our parent
# record.
try:
del parents[node_path]
except KeyError:
pass
# Delete the mapping from us to our parent.
del path_relations[node_path]
remove_directory(path)
def recurse(self, root_path, dir_cb, listing_cb, max_listing_size=0,
max_depth=MAX_REMOTE_RECURSION_DEPTH):
"""Recursively iterate a directory. Invoke callbacks for directories
and entries (both are optional, but it doesn't make sense unless one is
provided). "max_listing_size" will allow for the file-listing to be
chunked into manageable pieces. "max_depth" limited how deep recursion
goes. This can be used to make it easy to simply read a single
directory in chunks.
"""
q = deque([(root_path, 0)])
collected = []
def push_file(path, file_path, entry):
collected.append((file_path, entry))
if max_listing_size > 0 and \
len(collected) >= max_listing_size:
listing_cb(path, collected)
# Clear contents on the list. We delete it this way so that
# we're only -modifying- the list rather than replacing it (a
# requirement of a closure).
del collected[:]
while q:
(path, current_depth) = q.popleft()
entries = self.listdir(path)
for entry in entries:
filename = stringify(entry.name)
file_path = ('%s/%s' % (path, filename))
if entry.is_symlink:
push_file(path, file_path, entry)
elif entry.is_directory:
if filename == '.' or filename == '..':
continue
if dir_cb is not None:
dir_cb(path, file_path, entry)
new_depth = current_depth + 1
if max_depth is None or new_depth <= max_depth:
q.append((file_path, new_depth))
elif entry.is_regular:
if listing_cb is not None:
push_file(path, file_path, entry)
if listing_cb is not None and (max_listing_size == 0 or
len(collected) > 0):
listing_cb(path, collected)
def write_to_remote(self, filepath_from, filepath_to, mtime_dt=None):
"""Open a local file and write it remotely."""
self.__log.debug("Writing L[%s] -> R[%s]." % (filepath_from,
filepath_to))
with open(filepath_from, 'rb') as file_from:
with SftpFile(self, filepath_to, 'w') as sf_to:
while 1:
part = file_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
sf_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
self.utimes_dt(filepath_to, mtime_dt, mtime_dt)
def utimes(self, file_path, atime_epoch, mtime_epoch):
_sftp_utimes(self.__sftp_session_int,
file_path,
atime_epoch,
mtime_epoch)
def utimes_dt(self, file_path, atime_dt, mtime_dt):
_sftp_utimes_dt(self.__sftp_session_int, file_path, atime_dt, mtime_dt)
@property
def session_id(self):
return self.__sftp_session_int
|
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpSession.write_to_remote | python | def write_to_remote(self, filepath_from, filepath_to, mtime_dt=None):
self.__log.debug("Writing L[%s] -> R[%s]." % (filepath_from,
filepath_to))
with open(filepath_from, 'rb') as file_from:
with SftpFile(self, filepath_to, 'w') as sf_to:
while 1:
part = file_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
sf_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
self.utimes_dt(filepath_to, mtime_dt, mtime_dt) | Open a local file and write it remotely. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L591-L609 | [
"def utimes_dt(self, file_path, atime_dt, mtime_dt):\n _sftp_utimes_dt(self.__sftp_session_int, file_path, atime_dt, mtime_dt)\n"
] | class SftpSession(object):
def __init__(self, ssh_session):
self.__ssh_session_int = getattr(ssh_session,
'session_id',
ssh_session)
self.__log = logging.getLogger('SSH_SESSION(%s)' %
(self.__ssh_session_int))
def __enter__(self):
return self.open()
def open(self):
self.__sftp_session_int = _sftp_new(self.__ssh_session_int)
_sftp_init(self.__sftp_session_int)
return self
def __exit__(self, e_type, e_value, e_tb):
self.close()
def close(self):
_sftp_free(self.__sftp_session_int)
def stat(self, file_path):
return _sftp_stat(self.__sftp_session_int, file_path)
def rename(self, filepath_old, filepath_new):
return _sftp_rename(self.__sftp_session_int, filepath_old, filepath_new)
def chmod(self, file_path, mode):
return _sftp_chmod(self.__sftp_session_int, file_path, mode)
def chown(self, file_path, uid, gid):
return _sftp_chown(self.__sftp_session_int, file_path, uid, gid)
def exists(self, path):
return _sftp_exists(self.__sftp_session_int, path)
def mkdir(self, path, mode=0o755):
return _sftp_mkdir(self.__sftp_session_int, path, mode)
def rmdir(self, path):
return _sftp_rmdir(self.__sftp_session_int, path)
def lstat(self, file_path):
return _sftp_lstat(self.__sftp_session_int, file_path)
def unlink(self, file_path):
return _sftp_unlink(self.__sftp_session_int, file_path)
def readlink(self, file_path):
return _sftp_readlink(self.__sftp_session_int, file_path)
def symlink(self, to, from_):
return _sftp_symlink(self.__sftp_session_int, to, from_)
def setstat(self, file_path, entry_attributes):
return _sftp_setstat(self.__sftp_session_int, file_path, entry_attributes)
def listdir(self, path, get_directories=True, get_files=True):
return _sftp_listdir(self.__sftp_session_int,
path,
get_directories,
get_files)
def rmtree(self, path):
self.__log.debug("REMOTE: Doing recursive remove: %s" % (path))
# Collect names and heirarchy of subdirectories. Also, delete files
# that we encounter.
# If we don't put our root path in here, the recursive removal (at the
# end, below) will fail once it get's back to the top.
path_relations = { path: None }
parents = {}
def remote_dir_cb(parent_path, full_path, entry):
path_relations[full_path] = parent_path
if parent_path not in parents:
parents[parent_path] = [full_path]
else:
parents[parent_path].append(full_path)
def remote_listing_cb(parent_path, listing):
for (file_path, entry) in listing:
self.__log.debug("REMOTE: Unlinking: %s" % (file_path))
self.unlink(file_path)
self.recurse(path,
remote_dir_cb,
remote_listing_cb,
MAX_MIRROR_LISTING_CHUNK_SIZE)
# Now, delete the directories. Descend to leaves and work our way back.
self.__log.debug("REMOTE: Removing (%d) directories/subdirectories." %
(len(path_relations)))
def remove_directory(node_path, depth=0):
if depth > MAX_REMOTE_RECURSION_DEPTH:
raise SftpError("Remote rmtree recursed too deeply. Either "
"the directories run very deep, or we've "
"encountered a cycle (probably via hard-"
"links).")
if node_path in parents:
while parents[node_path]:
remove_directory(parents[node_path][0], depth + 1)
del parents[node_path][0]
self.__log.debug("REMOTE: Removing directory: %s" % (node_path))
self.rmdir(node_path)
# All children subdirectories have been deleted. Delete our parent
# record.
try:
del parents[node_path]
except KeyError:
pass
# Delete the mapping from us to our parent.
del path_relations[node_path]
remove_directory(path)
def recurse(self, root_path, dir_cb, listing_cb, max_listing_size=0,
max_depth=MAX_REMOTE_RECURSION_DEPTH):
"""Recursively iterate a directory. Invoke callbacks for directories
and entries (both are optional, but it doesn't make sense unless one is
provided). "max_listing_size" will allow for the file-listing to be
chunked into manageable pieces. "max_depth" limited how deep recursion
goes. This can be used to make it easy to simply read a single
directory in chunks.
"""
q = deque([(root_path, 0)])
collected = []
def push_file(path, file_path, entry):
collected.append((file_path, entry))
if max_listing_size > 0 and \
len(collected) >= max_listing_size:
listing_cb(path, collected)
# Clear contents on the list. We delete it this way so that
# we're only -modifying- the list rather than replacing it (a
# requirement of a closure).
del collected[:]
while q:
(path, current_depth) = q.popleft()
entries = self.listdir(path)
for entry in entries:
filename = stringify(entry.name)
file_path = ('%s/%s' % (path, filename))
if entry.is_symlink:
push_file(path, file_path, entry)
elif entry.is_directory:
if filename == '.' or filename == '..':
continue
if dir_cb is not None:
dir_cb(path, file_path, entry)
new_depth = current_depth + 1
if max_depth is None or new_depth <= max_depth:
q.append((file_path, new_depth))
elif entry.is_regular:
if listing_cb is not None:
push_file(path, file_path, entry)
if listing_cb is not None and (max_listing_size == 0 or
len(collected) > 0):
listing_cb(path, collected)
def write_to_local(self, filepath_from, filepath_to, mtime_dt=None):
"""Open a remote file and write it locally."""
self.__log.debug("Writing R[%s] -> L[%s]." % (filepath_from,
filepath_to))
with SftpFile(self, filepath_from, 'r') as sf_from:
with open(filepath_to, 'wb') as file_to:
while 1:
part = sf_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
file_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
mtime_epoch = mktime(mtime_dt.timetuple())
utime(filepath_to, (mtime_epoch, mtime_epoch))
def utimes(self, file_path, atime_epoch, mtime_epoch):
_sftp_utimes(self.__sftp_session_int,
file_path,
atime_epoch,
mtime_epoch)
def utimes_dt(self, file_path, atime_dt, mtime_dt):
_sftp_utimes_dt(self.__sftp_session_int, file_path, atime_dt, mtime_dt)
@property
def session_id(self):
return self.__sftp_session_int
|
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFile.__at_om_to_im | python | def __at_om_to_im(self, om):
original_om = om
if om[0] == 'U':
om = om[1:]
is_um = True
else:
is_um = False
if om == 'r':
return (original_om, O_RDONLY, False, is_um)
elif om == 'w':
return (original_om, O_WRONLY | O_CREAT | O_TRUNC, False, is_um)
elif om == 'a':
return (original_om, O_WRONLY | O_CREAT, False, is_um)
elif om == 'r+':
return (original_om, O_RDWR | O_CREAT, False, is_um)
elif om == 'w+':
return (original_om, O_RDWR | O_CREAT | O_TRUNC, False, is_um)
elif om == 'a+':
return (original_om, O_RDWR | O_CREAT, True, is_um)
else:
raise Exception("Outer access mode [%s] is invalid." %
(original_om)) | Convert an "outer" access mode to an "inner" access mode.
Returns a tuple of:
(<system access mode>, <is append>, <is universal newlines>). | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L664-L693 | null | class SftpFile(object):
def __init__(self, sftp_session, filepath, access_type_om='r',
create_mode=DEFAULT_CREATE_MODE):
at_im = self.__at_om_to_im(access_type_om)
self.__sftp_session_int = getattr(sftp_session,
'session_id',
sftp_session)
self.__filepath = filepath
self.__access_type = at_im
self.__create_mode = create_mode
def __repr__(self):
return ('<SFTP_FILE [%s] \"%s\">' %
(self.__access_type[0], self.__filepath))
def __at_om_to_im(self, om):
"""Convert an "outer" access mode to an "inner" access mode.
Returns a tuple of:
(<system access mode>, <is append>, <is universal newlines>).
"""
original_om = om
if om[0] == 'U':
om = om[1:]
is_um = True
else:
is_um = False
if om == 'r':
return (original_om, O_RDONLY, False, is_um)
elif om == 'w':
return (original_om, O_WRONLY | O_CREAT | O_TRUNC, False, is_um)
elif om == 'a':
return (original_om, O_WRONLY | O_CREAT, False, is_um)
elif om == 'r+':
return (original_om, O_RDWR | O_CREAT, False, is_um)
elif om == 'w+':
return (original_om, O_RDWR | O_CREAT | O_TRUNC, False, is_um)
elif om == 'a+':
return (original_om, O_RDWR | O_CREAT, True, is_um)
else:
raise Exception("Outer access mode [%s] is invalid." %
(original_om))
def __enter__(self):
return self.open()
def open(self):
"""This is the only way to open a file resource."""
self.__sf = _sftp_open(self.__sftp_session_int,
self.__filepath,
self.access_type_int,
self.__create_mode)
if self.access_type_is_append is True:
self.seek(self.filesize)
return SftpFileObject(self)
def __exit__(self, e_type, e_value, e_tb):
self.close()
def close(self):
_sftp_close(self.__sf)
def write(self, buffer_):
return _sftp_write(self.__sf, buffer_)
def seek(self, position):
return _sftp_seek(self.__sf, position)
def read(self, size):
"""Read a length of bytes. Return empty on EOF."""
return _sftp_read(self.__sf, size)
def fstat(self):
return _sftp_fstat(self.__sf)
def rewind(self):
return _sftp_rewind(self.__sf)
@property
def sf(self):
return self.__sf
@property
def position(self):
return _sftp_tell(self.__sf)
@property
def filesize(self):
return self.fstat().size
@property
def filepath(self):
return self.__filepath
@property
def access_type_str(self):
return self.__access_type[0]
@property
def access_type_int(self):
return self.__access_type[1]
@property
def access_type_is_append(self):
return self.__access_type[2]
@property
def access_type_has_universal_nl(self):
return self.__access_type[3]
|
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFile.open | python | def open(self):
self.__sf = _sftp_open(self.__sftp_session_int,
self.__filepath,
self.access_type_int,
self.__create_mode)
if self.access_type_is_append is True:
self.seek(self.filesize)
return SftpFileObject(self) | This is the only way to open a file resource. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L698-L709 | [
"def _sftp_open(sftp_session_int, filepath, access_type, mode):\n logging.debug(\"Opening file: %s\" % (filepath))\n\n sf = c_sftp_open(sftp_session_int, \n bytify(filepath), \n access_type, \n mode)\n\n if sf is None:\n type_ = sftp_get_error(sftp_session_int)\n if type_ >= 0:\n raise SftpError(\"Could not open file [%s]: %s\" % \n (filepath, sftp_get_error_string(type_)))\n else:\n raise SftpError(\"Could not open file [%s]. There was an \"\n \"unspecified error.\" % (filepath))\n\n logging.debug(\"File [%s] opened as [%s].\" % (filepath, sf))\n return sf\n",
"def seek(self, position):\n return _sftp_seek(self.__sf, position)\n"
] | class SftpFile(object):
def __init__(self, sftp_session, filepath, access_type_om='r',
create_mode=DEFAULT_CREATE_MODE):
at_im = self.__at_om_to_im(access_type_om)
self.__sftp_session_int = getattr(sftp_session,
'session_id',
sftp_session)
self.__filepath = filepath
self.__access_type = at_im
self.__create_mode = create_mode
def __repr__(self):
return ('<SFTP_FILE [%s] \"%s\">' %
(self.__access_type[0], self.__filepath))
def __at_om_to_im(self, om):
"""Convert an "outer" access mode to an "inner" access mode.
Returns a tuple of:
(<system access mode>, <is append>, <is universal newlines>).
"""
original_om = om
if om[0] == 'U':
om = om[1:]
is_um = True
else:
is_um = False
if om == 'r':
return (original_om, O_RDONLY, False, is_um)
elif om == 'w':
return (original_om, O_WRONLY | O_CREAT | O_TRUNC, False, is_um)
elif om == 'a':
return (original_om, O_WRONLY | O_CREAT, False, is_um)
elif om == 'r+':
return (original_om, O_RDWR | O_CREAT, False, is_um)
elif om == 'w+':
return (original_om, O_RDWR | O_CREAT | O_TRUNC, False, is_um)
elif om == 'a+':
return (original_om, O_RDWR | O_CREAT, True, is_um)
else:
raise Exception("Outer access mode [%s] is invalid." %
(original_om))
def __enter__(self):
return self.open()
def __exit__(self, e_type, e_value, e_tb):
self.close()
def close(self):
_sftp_close(self.__sf)
def write(self, buffer_):
return _sftp_write(self.__sf, buffer_)
def seek(self, position):
return _sftp_seek(self.__sf, position)
def read(self, size):
"""Read a length of bytes. Return empty on EOF."""
return _sftp_read(self.__sf, size)
def fstat(self):
return _sftp_fstat(self.__sf)
def rewind(self):
return _sftp_rewind(self.__sf)
@property
def sf(self):
return self.__sf
@property
def position(self):
return _sftp_tell(self.__sf)
@property
def filesize(self):
return self.fstat().size
@property
def filepath(self):
return self.__filepath
@property
def access_type_str(self):
return self.__access_type[0]
@property
def access_type_int(self):
return self.__access_type[1]
@property
def access_type_is_append(self):
return self.__access_type[2]
@property
def access_type_has_universal_nl(self):
return self.__access_type[3]
|
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFileObject.read | python | def read(self, size=None):
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b | Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L886-L912 | null | class SftpFileObject(object):
"""A file-like object interface for SFTP resources."""
__block_size = 8192
def __init__(self, sf):
self.__sf = sf
self.__buffer = StreamBuffer()
self.__offset = 0
self.__buffer_offset = 0
self.__newlines = {}
self.__eof = False
self.__log = logging.getLogger('FILE(%s)' % (sf))
def __repr__(self):
return ('<SFTP_FILE_OBJ [%s] \"%s\">' %
(self.mode, self.name.replace('"', '\\"')))
def write(self, buffer_):
# self.__log.debug("Writing (%d) bytes." % (len(buffer_)))
self.__sf.write(buffer_)
def close(self):
"""Close the resource."""
self.__sf.close()
def seek(self, offset, whence=SEEK_SET):
"""Reposition the file pointer."""
if whence == SEEK_SET:
self.__sf.seek(offset)
elif whence == SEEK_CUR:
self.__sf.seek(self.tell() + offset)
elif whence == SEEK_END:
self.__sf.seek(self.__sf.filesize - offset)
def tell(self):
"""Report the current position."""
return self.__sf.position
def flush(self):
"""Flush data. This is a no-op in our context."""
pass
def isatty(self):
"""Only return True if connected to a TTY device."""
return False
def __iter__(self):
return self
def __next__(self):
"""Iterate through lines of text."""
next_line = self.readline()
if next_line == '':
self.__log.debug("No more lines (EOF).")
raise StopIteration()
return next_line
# For Python 2.x compatibility.
next = __next__
def readline(self, size=None):
"""Read a single line of text with EOF."""
# TODO: Add support for Unicode.
(line, nl) = self.__buffer.read_until_nl(self.__retrieve_data)
if self.__sf.access_type_has_universal_nl and nl is not None:
self.__newlines[nl] = True
return line
def __retrieve_data(self):
"""Read more data from the file."""
if self.__eof is True:
return b''
logging.debug("Reading another block.")
block = self.read(self.__block_size)
if block == b'':
self.__log.debug("We've encountered the EOF.")
self.__eof = True
return block
def readlines(self, sizehint=None):
self.__log.debug("Reading all lines.")
collected = []
total = 0
for line in iter(self):
collected.append(line)
total += len(line)
if sizehint is not None and total > sizehint:
break
self.__log.debug("Read whole file as (%d) lines." % (len(collected)))
return ''.join(collected)
@property
def closed(self):
raise False
@property
def encoding(self):
return None
@property
def mode(self):
return self.__sf.access_type_str
@property
def name(self):
return self.__sf.filepath
@property
def newlines(self):
if self.__sf.access_type_has_universal_nl is False:
raise AttributeError("Universal newlines are unavailable since "
"not requested.")
return tuple(self.__newlines.keys())
@property
def raw(self):
return self.__sf
|
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFileObject.seek | python | def seek(self, offset, whence=SEEK_SET):
if whence == SEEK_SET:
self.__sf.seek(offset)
elif whence == SEEK_CUR:
self.__sf.seek(self.tell() + offset)
elif whence == SEEK_END:
self.__sf.seek(self.__sf.filesize - offset) | Reposition the file pointer. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L919-L927 | [
"def tell(self):\n \"\"\"Report the current position.\"\"\"\n\n return self.__sf.position\n"
] | class SftpFileObject(object):
"""A file-like object interface for SFTP resources."""
__block_size = 8192
def __init__(self, sf):
self.__sf = sf
self.__buffer = StreamBuffer()
self.__offset = 0
self.__buffer_offset = 0
self.__newlines = {}
self.__eof = False
self.__log = logging.getLogger('FILE(%s)' % (sf))
def __repr__(self):
return ('<SFTP_FILE_OBJ [%s] \"%s\">' %
(self.mode, self.name.replace('"', '\\"')))
def write(self, buffer_):
# self.__log.debug("Writing (%d) bytes." % (len(buffer_)))
self.__sf.write(buffer_)
def read(self, size=None):
"""Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
"""
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b
def close(self):
"""Close the resource."""
self.__sf.close()
def tell(self):
"""Report the current position."""
return self.__sf.position
def flush(self):
"""Flush data. This is a no-op in our context."""
pass
def isatty(self):
"""Only return True if connected to a TTY device."""
return False
def __iter__(self):
return self
def __next__(self):
"""Iterate through lines of text."""
next_line = self.readline()
if next_line == '':
self.__log.debug("No more lines (EOF).")
raise StopIteration()
return next_line
# For Python 2.x compatibility.
next = __next__
def readline(self, size=None):
"""Read a single line of text with EOF."""
# TODO: Add support for Unicode.
(line, nl) = self.__buffer.read_until_nl(self.__retrieve_data)
if self.__sf.access_type_has_universal_nl and nl is not None:
self.__newlines[nl] = True
return line
def __retrieve_data(self):
"""Read more data from the file."""
if self.__eof is True:
return b''
logging.debug("Reading another block.")
block = self.read(self.__block_size)
if block == b'':
self.__log.debug("We've encountered the EOF.")
self.__eof = True
return block
def readlines(self, sizehint=None):
self.__log.debug("Reading all lines.")
collected = []
total = 0
for line in iter(self):
collected.append(line)
total += len(line)
if sizehint is not None and total > sizehint:
break
self.__log.debug("Read whole file as (%d) lines." % (len(collected)))
return ''.join(collected)
@property
def closed(self):
raise False
@property
def encoding(self):
return None
@property
def mode(self):
return self.__sf.access_type_str
@property
def name(self):
return self.__sf.filepath
@property
def newlines(self):
if self.__sf.access_type_has_universal_nl is False:
raise AttributeError("Universal newlines are unavailable since "
"not requested.")
return tuple(self.__newlines.keys())
@property
def raw(self):
return self.__sf
|
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFileObject.readline | python | def readline(self, size=None):
# TODO: Add support for Unicode.
(line, nl) = self.__buffer.read_until_nl(self.__retrieve_data)
if self.__sf.access_type_has_universal_nl and nl is not None:
self.__newlines[nl] = True
return line | Read a single line of text with EOF. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L960-L969 | null | class SftpFileObject(object):
"""A file-like object interface for SFTP resources."""
__block_size = 8192
def __init__(self, sf):
self.__sf = sf
self.__buffer = StreamBuffer()
self.__offset = 0
self.__buffer_offset = 0
self.__newlines = {}
self.__eof = False
self.__log = logging.getLogger('FILE(%s)' % (sf))
def __repr__(self):
return ('<SFTP_FILE_OBJ [%s] \"%s\">' %
(self.mode, self.name.replace('"', '\\"')))
def write(self, buffer_):
# self.__log.debug("Writing (%d) bytes." % (len(buffer_)))
self.__sf.write(buffer_)
def read(self, size=None):
"""Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
"""
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b
def close(self):
"""Close the resource."""
self.__sf.close()
def seek(self, offset, whence=SEEK_SET):
"""Reposition the file pointer."""
if whence == SEEK_SET:
self.__sf.seek(offset)
elif whence == SEEK_CUR:
self.__sf.seek(self.tell() + offset)
elif whence == SEEK_END:
self.__sf.seek(self.__sf.filesize - offset)
def tell(self):
"""Report the current position."""
return self.__sf.position
def flush(self):
"""Flush data. This is a no-op in our context."""
pass
def isatty(self):
"""Only return True if connected to a TTY device."""
return False
def __iter__(self):
return self
def __next__(self):
"""Iterate through lines of text."""
next_line = self.readline()
if next_line == '':
self.__log.debug("No more lines (EOF).")
raise StopIteration()
return next_line
# For Python 2.x compatibility.
next = __next__
def __retrieve_data(self):
"""Read more data from the file."""
if self.__eof is True:
return b''
logging.debug("Reading another block.")
block = self.read(self.__block_size)
if block == b'':
self.__log.debug("We've encountered the EOF.")
self.__eof = True
return block
def readlines(self, sizehint=None):
self.__log.debug("Reading all lines.")
collected = []
total = 0
for line in iter(self):
collected.append(line)
total += len(line)
if sizehint is not None and total > sizehint:
break
self.__log.debug("Read whole file as (%d) lines." % (len(collected)))
return ''.join(collected)
@property
def closed(self):
raise False
@property
def encoding(self):
return None
@property
def mode(self):
return self.__sf.access_type_str
@property
def name(self):
return self.__sf.filepath
@property
def newlines(self):
if self.__sf.access_type_has_universal_nl is False:
raise AttributeError("Universal newlines are unavailable since "
"not requested.")
return tuple(self.__newlines.keys())
@property
def raw(self):
return self.__sf
|
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFileObject.__retrieve_data | python | def __retrieve_data(self):
if self.__eof is True:
return b''
logging.debug("Reading another block.")
block = self.read(self.__block_size)
if block == b'':
self.__log.debug("We've encountered the EOF.")
self.__eof = True
return block | Read more data from the file. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L971-L983 | null | class SftpFileObject(object):
"""A file-like object interface for SFTP resources."""
__block_size = 8192
def __init__(self, sf):
self.__sf = sf
self.__buffer = StreamBuffer()
self.__offset = 0
self.__buffer_offset = 0
self.__newlines = {}
self.__eof = False
self.__log = logging.getLogger('FILE(%s)' % (sf))
def __repr__(self):
return ('<SFTP_FILE_OBJ [%s] \"%s\">' %
(self.mode, self.name.replace('"', '\\"')))
def write(self, buffer_):
# self.__log.debug("Writing (%d) bytes." % (len(buffer_)))
self.__sf.write(buffer_)
def read(self, size=None):
"""Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
"""
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b
def close(self):
"""Close the resource."""
self.__sf.close()
def seek(self, offset, whence=SEEK_SET):
"""Reposition the file pointer."""
if whence == SEEK_SET:
self.__sf.seek(offset)
elif whence == SEEK_CUR:
self.__sf.seek(self.tell() + offset)
elif whence == SEEK_END:
self.__sf.seek(self.__sf.filesize - offset)
def tell(self):
"""Report the current position."""
return self.__sf.position
def flush(self):
"""Flush data. This is a no-op in our context."""
pass
def isatty(self):
"""Only return True if connected to a TTY device."""
return False
def __iter__(self):
return self
def __next__(self):
"""Iterate through lines of text."""
next_line = self.readline()
if next_line == '':
self.__log.debug("No more lines (EOF).")
raise StopIteration()
return next_line
# For Python 2.x compatibility.
next = __next__
def readline(self, size=None):
"""Read a single line of text with EOF."""
# TODO: Add support for Unicode.
(line, nl) = self.__buffer.read_until_nl(self.__retrieve_data)
if self.__sf.access_type_has_universal_nl and nl is not None:
self.__newlines[nl] = True
return line
def readlines(self, sizehint=None):
self.__log.debug("Reading all lines.")
collected = []
total = 0
for line in iter(self):
collected.append(line)
total += len(line)
if sizehint is not None and total > sizehint:
break
self.__log.debug("Read whole file as (%d) lines." % (len(collected)))
return ''.join(collected)
@property
def closed(self):
raise False
@property
def encoding(self):
return None
@property
def mode(self):
return self.__sf.access_type_str
@property
def name(self):
return self.__sf.filepath
@property
def newlines(self):
if self.__sf.access_type_has_universal_nl is False:
raise AttributeError("Universal newlines are unavailable since "
"not requested.")
return tuple(self.__newlines.keys())
@property
def raw(self):
return self.__sf
|
dsoprea/PySecure | pysecure/sftp_mirror.py | SftpMirror.mirror | python | def mirror(self, handler, path_from, path_to, log_files=False):
q = deque([''])
while q:
path = q.popleft()
full_from = ('%s/%s' % (path_from, path)) if path else path_from
full_to = ('%s/%s' % (path_to, path)) if path else path_to
subdirs = handler(full_from, full_to, log_files)
for subdir in subdirs:
q.append(('%s/%s' % (path, subdir)) if path else subdir) | Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/sftp_mirror.py#L26-L42 | null | class SftpMirror(object):
def __init__(self, sftp, allow_creates=True, allow_deletes=True,
create_cb=None, delete_cb=None):
self.__sftp_session = sftp
self.__allow_creates = allow_creates
self.__allow_deletes = allow_deletes
self.__create_cb = create_cb
self.__delete_cb = delete_cb
self.__log = logging.getLogger('SftpMirror')
def mirror(self, handler, path_from, path_to, log_files=False):
"""Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving.
"""
q = deque([''])
while q:
path = q.popleft()
full_from = ('%s/%s' % (path_from, path)) if path else path_from
full_to = ('%s/%s' % (path_to, path)) if path else path_to
subdirs = handler(full_from, full_to, log_files)
for subdir in subdirs:
q.append(('%s/%s' % (path, subdir)) if path else subdir)
def __get_local_files(self, path):
self.__log.debug("Checking local files.")
local_dirs = set()
def local_dir_cb(parent_path, full_path, filename):
local_dirs.add(filename)
local_entities = set()
local_files = set()
local_attributes = {}
def local_listing_cb(parent_path, listing):
for entry in listing:
(filename, mtime, size, flags) = entry
entity = (filename, mtime, size, flags[1])
local_entities.add(entity)
local_files.add(filename)
local_attributes[filename] = (datetime.fromtimestamp(mtime),
flags)
local_recurse(path,
local_dir_cb,
local_listing_cb,
MAX_MIRROR_LISTING_CHUNK_SIZE,
0)
self.__log.debug("TO:\n(%d) directories\n(%d) files found." %
(len(local_dirs), len(local_files)))
return (local_dirs, local_entities, local_files, local_attributes)
def __get_remote_files(self, path):
self.__log.debug("Checking remote files.")
# TODO: Decode all read paths/files from ASCII: (str).decode('ascii')
remote_dirs = set()
def remote_dir_cb(parent_path, full_path, entry):
remote_dirs.add(stringify(entry.name))
remote_entities = set()
remote_files = set()
remote_attributes = {}
def remote_listing_cb(parent_path, listing):
for (file_path, entry) in listing:
entity = (stringify(entry.name), entry.modified_time,
entry.size, entry.is_symlink)
remote_entities.add(entity)
remote_files.add(stringify(entry.name))
flags = (entry.is_regular, entry.is_symlink, entry.is_special)
remote_attributes[stringify(entry.name)] = \
(entry.modified_time_dt, flags)
self.__sftp_session.recurse(path,
remote_dir_cb,
remote_listing_cb,
MAX_MIRROR_LISTING_CHUNK_SIZE,
0)
self.__log.debug("FROM:\n(%d) directories\n(%d) files found." %
(len(remote_dirs), len(remote_files)))
return (remote_dirs, remote_entities, remote_files, remote_attributes)
def __get_deltas(self, from_tuple, to_tuple, log_files=False):
(to_dirs, to_entities, to_files, to_attributes) = to_tuple
(from_dirs, from_entities, from_files, from_attributes) = from_tuple
self.__log.debug("Checking deltas.")
# Now, calculate the differences.
new_dirs = from_dirs - to_dirs
if log_files is True:
for new_dir in new_dirs:
logging.debug("Will CREATE directory: %s" % (new_dir))
deleted_dirs = to_dirs - from_dirs
if log_files is True:
for deleted_dir in deleted_dirs:
logging.debug("Will DELETE directory: %s" % (deleted_dir))
# Get the files from FROM that aren't identical to existing TO
# entries. These will be copied.
new_entities = from_entities - to_entities
if log_files is True:
for new_entity in new_entities:
logging.debug("Will CREATE file: %s" % (new_entity[0]))
# Get the files from TO that aren't identical to existing FROM
# entries. These will be deleted.
deleted_entities = to_entities - from_entities
if log_files is True:
for deleted_entity in deleted_entities:
logging.debug("Will DELETE file: %s" % (deleted_entity[0]))
self.__log.debug("DELTA:\n(%d) new directories\n(%d) deleted "
"directories\n(%d) new files\n(%d) deleted "
"files" %
(len(new_dirs), len(deleted_dirs),
len(new_entities), len(deleted_entities)))
return (new_dirs, deleted_dirs, new_entities, deleted_entities)
def __fix_deltas_at_target(self, context, ops):
(from_tuple, path_from, path_to, delta_tuple) = context
(new_dirs, deleted_dirs, new_entities, deleted_entities) = delta_tuple
(unlink_, rmtree_, mkdir_, copy_, symlink_) = ops
self.__log.debug("Removing (%d) directories." % (len(deleted_dirs)))
# Delete all FROM-deleted non-directory entries, regardless of type.
if self.__allow_deletes is True:
for (name, mtime, size, is_link) in deleted_entities:
file_path = ('%s/%s' % (path_to, name))
self.__log.debug("UPDATE: Removing TO file-path: %s" %
(file_path))
if self.__delete_cb is None or \
self.__delete_cb(file_path, (mtime, size, is_link)) is True:
unlink_(file_path)
# Delete all FROM-deleted directories. We do this after the
# individual files are created so that, if all of the files from the
# directory are to be removed, we can show progress for each file
# rather than blocking on a tree-delete just to error-out on the
# unlink()'s, later.
if self.__allow_deletes is True:
for name in deleted_dirs:
final_path = ('%s/%s' % (path_to, name))
self.__log.debug("UPDATE: Removing TO directory: %s" %
(final_path))
if self.__delete_cb is None or \
self.__delete_cb(final_path, None) is True:
rmtree_(final_path)
# Create new directories.
if self.__allow_creates is True:
for name in new_dirs:
final_path = ('%s/%s' % (path_to, name))
self.__log.debug("UPDATE: Creating TO directory: %s" %
(final_path))
if self.__create_cb is None or \
self.__create_cb(final_path, None) is True:
mkdir_(final_path)
(from_dirs, from_entities, from_files, from_attributes) = from_tuple
# Write new/changed files. Handle all but "unknown" file types.
if self.__allow_creates is True:
for (name, mtime, size, is_link) in new_entities:
attr = from_attributes[name]
(mtime_dt, (is_regular, is_symlink, is_special)) = attr
filepath_from = ('%s/%s' % (path_from, name))
filepath_to = ('%s/%s' % (path_to, name))
if self.__create_cb is not None and \
self.__create_cb(filepath_to, (mtime, size, is_link)) is False:
continue
if is_regular:
self.__log.debug("UPDATE: Creating regular TO file-path: "
"%s" % (filepath_to))
copy_(filepath_from,
filepath_to,
mtime_dt)
elif is_symlink:
linked_to = self.__sftp_session.readlink(filepath_from)
self.__log.debug("UPDATE: Creating symlink at [%s] to [%s]." %
(filepath_to, linked_to))
# filepath_to: The physical file.
# linked_to: The target.
symlink_(linked_to, filepath_to)
elif is_special:
# SSH can't indulge us for devices, etc..
self.__log.warn("Skipping 'special' file at origin: %s" %
(filepath_from))
return list(from_dirs)
def __get_local_ops(self):
return (unlink,
rmtree,
mkdir,
self.__sftp_session.write_to_local,
symlink)
def __get_remote_ops(self):
return (self.__sftp_session.unlink,
self.__sftp_session.rmtree,
self.__sftp_session.mkdir,
self.__sftp_session.write_to_remote,
self.__sftp_session.symlink)
def mirror_to_local_no_recursion(self, path_from, path_to,
log_files=False):
"""Mirror a directory without descending into directories. Return a
list of subdirectory names (do not include full path). We will unlink
existing files without determining if they're just going to be
rewritten and then truncating them because it is our belief, based on
what little we could find, that unlinking is, usually, quicker than
truncating.
"""
self.__log.debug("Ensuring local target directory exists: %s" %
(path_to))
try:
mkdir(path_to)
except OSError:
already_exists = True
self.__log.debug("Local target already exists.")
else:
already_exists = False
self.__log.debug("Local target created.")
from_tuple = self.__get_remote_files(path_from)
to_tuple = self.__get_local_files(path_to)
delta_tuple = self.__get_deltas(from_tuple, to_tuple, log_files)
context = (from_tuple, path_from, path_to, delta_tuple)
ops = self.__get_local_ops()
return self.__fix_deltas_at_target(context, ops)
def mirror_to_remote_no_recursion(self, path_from, path_to,
log_files=False):
self.__log.debug("Ensuring remote target directory exists: %s" %
(path_to))
try:
self.__sftp_session.mkdir(path_to)
except SftpAlreadyExistsError:
already_exists = True
self.__log.debug("Remote target already exists.")
else:
already_exists = False
self.__log.debug("Remote target created.")
from_tuple = self.__get_local_files(path_from)
to_tuple = self.__get_remote_files(path_to)
delta_tuple = self.__get_deltas(from_tuple, to_tuple, log_files)
context = (from_tuple, path_from, path_to, delta_tuple)
ops = self.__get_remote_ops()
return self.__fix_deltas_at_target(context, ops)
|
dsoprea/PySecure | pysecure/sftp_mirror.py | SftpMirror.mirror_to_local_no_recursion | python | def mirror_to_local_no_recursion(self, path_from, path_to,
log_files=False):
self.__log.debug("Ensuring local target directory exists: %s" %
(path_to))
try:
mkdir(path_to)
except OSError:
already_exists = True
self.__log.debug("Local target already exists.")
else:
already_exists = False
self.__log.debug("Local target created.")
from_tuple = self.__get_remote_files(path_from)
to_tuple = self.__get_local_files(path_to)
delta_tuple = self.__get_deltas(from_tuple, to_tuple, log_files)
context = (from_tuple, path_from, path_to, delta_tuple)
ops = self.__get_local_ops()
return self.__fix_deltas_at_target(context, ops) | Mirror a directory without descending into directories. Return a
list of subdirectory names (do not include full path). We will unlink
existing files without determining if they're just going to be
rewritten and then truncating them because it is our belief, based on
what little we could find, that unlinking is, usually, quicker than
truncating. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/sftp_mirror.py#L253-L282 | [
"def __get_local_files(self, path):\n self.__log.debug(\"Checking local files.\")\n\n local_dirs = set()\n def local_dir_cb(parent_path, full_path, filename):\n local_dirs.add(filename)\n\n local_entities = set()\n local_files = set()\n local_attributes = {}\n def local_listing_cb(parent_path, listing):\n for entry in listing:\n (filename, mtime, size, flags) = entry\n\n entity = (filename, mtime, size, flags[1])\n local_entities.add(entity)\n local_files.add(filename)\n local_attributes[filename] = (datetime.fromtimestamp(mtime), \n flags)\n\n local_recurse(path, \n local_dir_cb, \n local_listing_cb, \n MAX_MIRROR_LISTING_CHUNK_SIZE, \n 0)\n\n self.__log.debug(\"TO:\\n(%d) directories\\n(%d) files found.\" % \n (len(local_dirs), len(local_files)))\n\n return (local_dirs, local_entities, local_files, local_attributes)\n",
" def __get_remote_files(self, path):\n self.__log.debug(\"Checking remote files.\")\n\n# TODO: Decode all read paths/files from ASCII: (str).decode('ascii')\n\n remote_dirs = set()\n def remote_dir_cb(parent_path, full_path, entry):\n remote_dirs.add(stringify(entry.name))\n\n remote_entities = set()\n remote_files = set()\n remote_attributes = {}\n def remote_listing_cb(parent_path, listing):\n for (file_path, entry) in listing:\n entity = (stringify(entry.name), entry.modified_time, \n entry.size, entry.is_symlink)\n\n remote_entities.add(entity)\n remote_files.add(stringify(entry.name))\n\n flags = (entry.is_regular, entry.is_symlink, entry.is_special)\n remote_attributes[stringify(entry.name)] = \\\n (entry.modified_time_dt, flags)\n\n self.__sftp_session.recurse(path,\n remote_dir_cb, \n remote_listing_cb, \n MAX_MIRROR_LISTING_CHUNK_SIZE,\n 0)\n\n self.__log.debug(\"FROM:\\n(%d) directories\\n(%d) files found.\" % \n (len(remote_dirs), len(remote_files)))\n\n return (remote_dirs, remote_entities, remote_files, remote_attributes)\n",
"def __get_deltas(self, from_tuple, to_tuple, log_files=False):\n (to_dirs, to_entities, to_files, to_attributes) = to_tuple\n (from_dirs, from_entities, from_files, from_attributes) = from_tuple\n\n self.__log.debug(\"Checking deltas.\")\n\n # Now, calculate the differences.\n\n new_dirs = from_dirs - to_dirs\n\n if log_files is True:\n for new_dir in new_dirs:\n logging.debug(\"Will CREATE directory: %s\" % (new_dir))\n\n deleted_dirs = to_dirs - from_dirs\n\n if log_files is True:\n for deleted_dir in deleted_dirs:\n logging.debug(\"Will DELETE directory: %s\" % (deleted_dir))\n\n # Get the files from FROM that aren't identical to existing TO \n # entries. These will be copied.\n new_entities = from_entities - to_entities\n\n if log_files is True:\n for new_entity in new_entities:\n logging.debug(\"Will CREATE file: %s\" % (new_entity[0]))\n\n # Get the files from TO that aren't identical to existing FROM\n # entries. These will be deleted.\n deleted_entities = to_entities - from_entities\n\n if log_files is True:\n for deleted_entity in deleted_entities:\n logging.debug(\"Will DELETE file: %s\" % (deleted_entity[0]))\n\n self.__log.debug(\"DELTA:\\n(%d) new directories\\n(%d) deleted \"\n \"directories\\n(%d) new files\\n(%d) deleted \"\n \"files\" % \n (len(new_dirs), len(deleted_dirs), \n len(new_entities), len(deleted_entities)))\n\n return (new_dirs, deleted_dirs, new_entities, deleted_entities)\n",
"def __fix_deltas_at_target(self, context, ops):\n (from_tuple, path_from, path_to, delta_tuple) = context\n (new_dirs, deleted_dirs, new_entities, deleted_entities) = delta_tuple\n (unlink_, rmtree_, mkdir_, copy_, symlink_) = ops\n\n self.__log.debug(\"Removing (%d) directories.\" % (len(deleted_dirs)))\n\n # Delete all FROM-deleted non-directory entries, regardless of type.\n\n if self.__allow_deletes is True:\n for (name, mtime, size, is_link) in deleted_entities:\n file_path = ('%s/%s' % (path_to, name))\n self.__log.debug(\"UPDATE: Removing TO file-path: %s\" % \n (file_path))\n\n if self.__delete_cb is None or \\\n self.__delete_cb(file_path, (mtime, size, is_link)) is True:\n unlink_(file_path)\n\n # Delete all FROM-deleted directories. We do this after the \n # individual files are created so that, if all of the files from the\n # directory are to be removed, we can show progress for each file \n # rather than blocking on a tree-delete just to error-out on the \n # unlink()'s, later.\n if self.__allow_deletes is True:\n for name in deleted_dirs:\n final_path = ('%s/%s' % (path_to, name))\n self.__log.debug(\"UPDATE: Removing TO directory: %s\" % \n (final_path))\n\n if self.__delete_cb is None or \\\n self.__delete_cb(final_path, None) is True:\n rmtree_(final_path)\n\n # Create new directories.\n if self.__allow_creates is True:\n for name in new_dirs:\n final_path = ('%s/%s' % (path_to, name))\n self.__log.debug(\"UPDATE: Creating TO directory: %s\" % \n (final_path))\n\n if self.__create_cb is None or \\\n self.__create_cb(final_path, None) is True:\n mkdir_(final_path)\n\n (from_dirs, from_entities, from_files, from_attributes) = from_tuple\n\n # Write new/changed files. Handle all but \"unknown\" file types.\n if self.__allow_creates is True:\n for (name, mtime, size, is_link) in new_entities:\n attr = from_attributes[name]\n (mtime_dt, (is_regular, is_symlink, is_special)) = attr\n\n filepath_from = ('%s/%s' % (path_from, name))\n filepath_to = ('%s/%s' % (path_to, name))\n\n if self.__create_cb is not None and \\\n self.__create_cb(filepath_to, (mtime, size, is_link)) is False:\n continue\n\n if is_regular:\n self.__log.debug(\"UPDATE: Creating regular TO file-path: \"\n \"%s\" % (filepath_to))\n\n copy_(filepath_from, \n filepath_to, \n mtime_dt)\n\n elif is_symlink:\n linked_to = self.__sftp_session.readlink(filepath_from)\n\n self.__log.debug(\"UPDATE: Creating symlink at [%s] to [%s].\" % \n (filepath_to, linked_to))\n\n # filepath_to: The physical file.\n # linked_to: The target.\n symlink_(linked_to, filepath_to)\n\n elif is_special:\n # SSH can't indulge us for devices, etc..\n self.__log.warn(\"Skipping 'special' file at origin: %s\" % \n (filepath_from))\n\n return list(from_dirs)\n",
"def __get_local_ops(self):\n return (unlink, \n rmtree, \n mkdir, \n self.__sftp_session.write_to_local, \n symlink)\n"
] | class SftpMirror(object):
def __init__(self, sftp, allow_creates=True, allow_deletes=True,
create_cb=None, delete_cb=None):
self.__sftp_session = sftp
self.__allow_creates = allow_creates
self.__allow_deletes = allow_deletes
self.__create_cb = create_cb
self.__delete_cb = delete_cb
self.__log = logging.getLogger('SftpMirror')
def mirror(self, handler, path_from, path_to, log_files=False):
"""Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving.
"""
q = deque([''])
while q:
path = q.popleft()
full_from = ('%s/%s' % (path_from, path)) if path else path_from
full_to = ('%s/%s' % (path_to, path)) if path else path_to
subdirs = handler(full_from, full_to, log_files)
for subdir in subdirs:
q.append(('%s/%s' % (path, subdir)) if path else subdir)
def __get_local_files(self, path):
self.__log.debug("Checking local files.")
local_dirs = set()
def local_dir_cb(parent_path, full_path, filename):
local_dirs.add(filename)
local_entities = set()
local_files = set()
local_attributes = {}
def local_listing_cb(parent_path, listing):
for entry in listing:
(filename, mtime, size, flags) = entry
entity = (filename, mtime, size, flags[1])
local_entities.add(entity)
local_files.add(filename)
local_attributes[filename] = (datetime.fromtimestamp(mtime),
flags)
local_recurse(path,
local_dir_cb,
local_listing_cb,
MAX_MIRROR_LISTING_CHUNK_SIZE,
0)
self.__log.debug("TO:\n(%d) directories\n(%d) files found." %
(len(local_dirs), len(local_files)))
return (local_dirs, local_entities, local_files, local_attributes)
def __get_remote_files(self, path):
self.__log.debug("Checking remote files.")
# TODO: Decode all read paths/files from ASCII: (str).decode('ascii')
remote_dirs = set()
def remote_dir_cb(parent_path, full_path, entry):
remote_dirs.add(stringify(entry.name))
remote_entities = set()
remote_files = set()
remote_attributes = {}
def remote_listing_cb(parent_path, listing):
for (file_path, entry) in listing:
entity = (stringify(entry.name), entry.modified_time,
entry.size, entry.is_symlink)
remote_entities.add(entity)
remote_files.add(stringify(entry.name))
flags = (entry.is_regular, entry.is_symlink, entry.is_special)
remote_attributes[stringify(entry.name)] = \
(entry.modified_time_dt, flags)
self.__sftp_session.recurse(path,
remote_dir_cb,
remote_listing_cb,
MAX_MIRROR_LISTING_CHUNK_SIZE,
0)
self.__log.debug("FROM:\n(%d) directories\n(%d) files found." %
(len(remote_dirs), len(remote_files)))
return (remote_dirs, remote_entities, remote_files, remote_attributes)
def __get_deltas(self, from_tuple, to_tuple, log_files=False):
(to_dirs, to_entities, to_files, to_attributes) = to_tuple
(from_dirs, from_entities, from_files, from_attributes) = from_tuple
self.__log.debug("Checking deltas.")
# Now, calculate the differences.
new_dirs = from_dirs - to_dirs
if log_files is True:
for new_dir in new_dirs:
logging.debug("Will CREATE directory: %s" % (new_dir))
deleted_dirs = to_dirs - from_dirs
if log_files is True:
for deleted_dir in deleted_dirs:
logging.debug("Will DELETE directory: %s" % (deleted_dir))
# Get the files from FROM that aren't identical to existing TO
# entries. These will be copied.
new_entities = from_entities - to_entities
if log_files is True:
for new_entity in new_entities:
logging.debug("Will CREATE file: %s" % (new_entity[0]))
# Get the files from TO that aren't identical to existing FROM
# entries. These will be deleted.
deleted_entities = to_entities - from_entities
if log_files is True:
for deleted_entity in deleted_entities:
logging.debug("Will DELETE file: %s" % (deleted_entity[0]))
self.__log.debug("DELTA:\n(%d) new directories\n(%d) deleted "
"directories\n(%d) new files\n(%d) deleted "
"files" %
(len(new_dirs), len(deleted_dirs),
len(new_entities), len(deleted_entities)))
return (new_dirs, deleted_dirs, new_entities, deleted_entities)
def __fix_deltas_at_target(self, context, ops):
(from_tuple, path_from, path_to, delta_tuple) = context
(new_dirs, deleted_dirs, new_entities, deleted_entities) = delta_tuple
(unlink_, rmtree_, mkdir_, copy_, symlink_) = ops
self.__log.debug("Removing (%d) directories." % (len(deleted_dirs)))
# Delete all FROM-deleted non-directory entries, regardless of type.
if self.__allow_deletes is True:
for (name, mtime, size, is_link) in deleted_entities:
file_path = ('%s/%s' % (path_to, name))
self.__log.debug("UPDATE: Removing TO file-path: %s" %
(file_path))
if self.__delete_cb is None or \
self.__delete_cb(file_path, (mtime, size, is_link)) is True:
unlink_(file_path)
# Delete all FROM-deleted directories. We do this after the
# individual files are created so that, if all of the files from the
# directory are to be removed, we can show progress for each file
# rather than blocking on a tree-delete just to error-out on the
# unlink()'s, later.
if self.__allow_deletes is True:
for name in deleted_dirs:
final_path = ('%s/%s' % (path_to, name))
self.__log.debug("UPDATE: Removing TO directory: %s" %
(final_path))
if self.__delete_cb is None or \
self.__delete_cb(final_path, None) is True:
rmtree_(final_path)
# Create new directories.
if self.__allow_creates is True:
for name in new_dirs:
final_path = ('%s/%s' % (path_to, name))
self.__log.debug("UPDATE: Creating TO directory: %s" %
(final_path))
if self.__create_cb is None or \
self.__create_cb(final_path, None) is True:
mkdir_(final_path)
(from_dirs, from_entities, from_files, from_attributes) = from_tuple
# Write new/changed files. Handle all but "unknown" file types.
if self.__allow_creates is True:
for (name, mtime, size, is_link) in new_entities:
attr = from_attributes[name]
(mtime_dt, (is_regular, is_symlink, is_special)) = attr
filepath_from = ('%s/%s' % (path_from, name))
filepath_to = ('%s/%s' % (path_to, name))
if self.__create_cb is not None and \
self.__create_cb(filepath_to, (mtime, size, is_link)) is False:
continue
if is_regular:
self.__log.debug("UPDATE: Creating regular TO file-path: "
"%s" % (filepath_to))
copy_(filepath_from,
filepath_to,
mtime_dt)
elif is_symlink:
linked_to = self.__sftp_session.readlink(filepath_from)
self.__log.debug("UPDATE: Creating symlink at [%s] to [%s]." %
(filepath_to, linked_to))
# filepath_to: The physical file.
# linked_to: The target.
symlink_(linked_to, filepath_to)
elif is_special:
# SSH can't indulge us for devices, etc..
self.__log.warn("Skipping 'special' file at origin: %s" %
(filepath_from))
return list(from_dirs)
def __get_local_ops(self):
return (unlink,
rmtree,
mkdir,
self.__sftp_session.write_to_local,
symlink)
def __get_remote_ops(self):
return (self.__sftp_session.unlink,
self.__sftp_session.rmtree,
self.__sftp_session.mkdir,
self.__sftp_session.write_to_remote,
self.__sftp_session.symlink)
def mirror_to_remote_no_recursion(self, path_from, path_to,
log_files=False):
self.__log.debug("Ensuring remote target directory exists: %s" %
(path_to))
try:
self.__sftp_session.mkdir(path_to)
except SftpAlreadyExistsError:
already_exists = True
self.__log.debug("Remote target already exists.")
else:
already_exists = False
self.__log.debug("Remote target created.")
from_tuple = self.__get_local_files(path_from)
to_tuple = self.__get_remote_files(path_to)
delta_tuple = self.__get_deltas(from_tuple, to_tuple, log_files)
context = (from_tuple, path_from, path_to, delta_tuple)
ops = self.__get_remote_ops()
return self.__fix_deltas_at_target(context, ops)
|
dsoprea/PySecure | pysecure/easy.py | connect_ssh_with_cb | python | def connect_ssh_with_cb(ssh_cb, user, host, auth_cb, allow_new=True,
verbosity=0):
with connect_ssh(user, host, auth_cb, allow_new=True, verbosity=0) as ssh:
ssh_cb(ssh) | A "managed" SSH session. When the session is ready, we'll invoke the
"ssh_cb" callback. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/easy.py#L20-L27 | [
"def __ssh_cb(self, ssh):\n host_remote = 'localhost'\n port_remote = 80\n host_source = 'localhost'\n port_local = 1111\n data = b\"GET / HTTP/1.1\\nHost: localhost\\n\\n\"\n\n with SshChannel(ssh) as sc:\n sc.open_forward(host_remote, \n port_remote, \n host_source, \n port_local)\n\n sc.write(data)\n\n received = sc.read(1024)\n",
" def __ssh_cb(self, ssh):\n def build_body(status_code, status_string, content):\n replacements = { 'scode': status_code,\n 'sstring': status_string,\n 'length': len(content),\n 'content': content }\n\n return \"\"\"HTTP/1.1 %(scode)d %(sstring)s\nContent-Type: text/html\nContent-Length: %(length)d\n\n%(content)s\"\"\" % replacements\n\n response_helloworld = build_body(200, 'OK', \"\"\"<html>\n<head>\n<title>Hello, World!</title>\n</head>\n<body>\n<h1>Hello, World!</h1>\n</body>\n</html>\n\"\"\")\n\n response_notfound = build_body(404, 'Not found', \"\"\"<html>\n<head>\n<title>Not Found</title>\n</head>\n<body>\n<h1>Resource not found.</h1>\n</body>\n</html>\n\"\"\")\n\n response_error = build_body(500, 'Server error', \"\"\"<html>\n<head>\n<title>Server Error</title>\n</head>\n<body>\n<h1>There was a server failure.</h1>\n</body>\n</html>\n\"\"\")\n\n server_address = None\n server_port = 8080\n accept_timeout_ms = 60000\n\n print(\"Setting listen.\")\n port = ssh.forward_listen(server_address, server_port)\n\n print(\"Waiting for connection.\")\n\n with ssh.forward_accept(accept_timeout_ms) as sc:\n while 1:\n buffer_ = sc.read(2048)\n print(buffer_)\n if buffer_ == b'':\n continue\n\n try:\n nl_index = buffer_.index(b'\\n')\n except ValueError:\n print(\"Error with:\\n%s\" % (len(buffer_)))\n payload = response_error\n else:\n request_line = buffer_[:nl_index]\n\n if request_line[:6] == b'GET / ':\n print(\"Responding: %s\" % (request_line))\n payload = response_helloworld\n else:\n print(\"Ignoring: %s\" % (request_line))\n payload = response_notfound\n\n sc.write(payload)\n print(\"Sent answer.\")\n",
"def __ssh_cb(self, ssh):\n print(\"Is blocking: %s\" % (ssh.is_blocking()))\n\n server_address = None\n server_port = 8080\n accept_timeout_ms = 60000\n\n port = ssh.forward_listen(server_address, server_port)\n with ssh.forward_accept(accept_timeout_ms) as sc:\n print(\"Waiting for X11 connection.\")\n x11_channel = sc.accept_x11(60000)\n\n print(\"Requesting.\")\n x11_channel.request_x11()\n\n print(\"Looping.\")\n while 1:\n sleep(.1)\n",
" def __ssh_cb(self, ssh):\n data = ssh.execute('lsb_release -a')\n# print(data)\n\n data = ssh.execute('whoami')\n",
" def __ssh_cb(self, ssh):\n rsp = RemoteShellProcessor(ssh)\n\n def shell_context_cb(sc, welcome):\n# print('-' * 50 + '\\n' + \n# welcome + '\\n' + \n# '-' * 50)\n\n output = rsp.do_command('whoami')\n# print(output)\n\n# output = rsp.do_command('cat /proc/uptime')\n# print(output)\n\n# Doesn't work. See bug report at libssh.\n# print(\"Setting environment.\")\n# sc.request_env('aa', 'bb')\n# sc.request_env('LANG', 'en_US.UTF-8')\n\n rsp.shell(shell_context_cb)\n",
"def __ssh_cb(self, ssh):\n print(\"Disconnect message: %s\" % (ssh.get_disconnect_message(),))\n"
] | import logging
import contextlib
from pysecure.adapters.ssha import SshSession, SshConnect, SshSystem, \
PublicKeyHash, ssh_pki_import_privkey_file
from pysecure.adapters.sftpa import SftpSession, SftpFile
@contextlib.contextmanager
def connect_ssh(user, host, auth_cb, allow_new=True, *args, **kwargs):
with SshSystem():
with SshSession(user=user, host=host, *args, **kwargs) as ssh:
with SshConnect(ssh):
logging.debug("Ready to authenticate.")
ssh.is_server_known(allow_new=allow_new)
auth_cb(ssh)
yield ssh
@contextlib.contextmanager
def _connect_sftp(ssh, *args, **kwargs):
"""A "managed" SFTP session. When the SSH session and an additional SFTP
session are ready, invoke the sftp_cb callback.
"""
with SftpSession(ssh) as sftp:
yield (ssh, sftp)
# TODO(dustin): Deprecate this call.
def connect_sftp_with_cb(sftp_cb, *args, **kwargs):
"""A "managed" SFTP session. When the SSH session and an additional SFTP
session are ready, invoke the sftp_cb callback.
"""
with _connect_sftp(*args, **kwargs) as (ssh, sftp):
sftp_cb(ssh, sftp)
def get_key_auth_cb(key_filepath):
"""This is just a convenience function for key-based login."""
def auth_cb(ssh):
key = ssh_pki_import_privkey_file(key_filepath)
ssh.userauth_publickey(key)
return auth_cb
def get_password_auth_cb(password):
"""This is just a convenience function for password-based login."""
def auth_cb(ssh):
ssh.userauth_password(password)
return auth_cb
class EasySsh(object):
"""This class allows a connection to be opened and closed at two separate
points (as opposed to the callback methods, above).
"""
def __init__(self, user, host, auth_cb, allow_new=True, **session_args):
self.__user = user
self.__host = host
self.__auth_cb = auth_cb
self.__allow_new = allow_new
self.__session_args = session_args
self.__log = logging.getLogger('EasySsh')
self.__ssh_session = None
self.__ssh_opened = False
self.__sftp_session = None
self.__sftp_opened = False
def __del__(self):
if self.__ssh_opened is True:
self.close_ssh()
def open_ssh(self):
self.__log.debug("Opening SSH.")
if self.__ssh_opened is True:
raise Exception("Can not open SFTP session that is already open.")
# TODO: This might be required to only be run once, globally.
self.__system = SshSystem()
self.__system.open()
self.__ssh_session = SshSession(user=self.__user, host=self.__host,
**self.__session_args)
self.__ssh_session.open()
self.__connect = SshConnect(self.__ssh_session)
self.__connect.open()
self.__ssh_session.is_server_known(allow_new=self.__allow_new)
self.__auth_cb(self.__ssh_session)
self.__ssh_opened = True
def close_ssh(self):
self.__log.debug("Closing SSH.")
if self.__ssh_opened is False:
raise Exception("Can not close SSH session that is not currently "
"opened.")
if self.__sftp_opened is True:
self.close_sftp()
self.__connect.close()
self.__ssh_session.close()
self.__system.close()
self.__ssh_session = None
self.__ssh_opened = False
def open_sftp(self):
self.__log.debug("Opening SFTP.")
if self.__sftp_opened is True:
raise Exception("Can not open SFTP session that is already open.")
self.__sftp_session = SftpSession(self.__ssh_session)
self.__sftp_session.open()
self.__sftp_opened = True
def close_sftp(self):
self.__log.debug("Closing SFTP.")
if self.__sftp_opened is False:
raise Exception("Can not close SFTP session that is not currently "
"opened.")
self.__sftp_session.close()
self.__sftp_session = None
self.__sftp_opened = False
@property
def ssh(self):
if self.__ssh_opened is False:
raise Exception("Can not return an SSH session. A session is not "
"open.")
return self.__ssh_session
@property
def sftp(self):
if self.__sftp_opened is False:
raise Exception("Can not return an SFTP session. A session is not "
"open.")
return self.__sftp_session
|
dsoprea/PySecure | pysecure/easy.py | connect_sftp_with_cb | python | def connect_sftp_with_cb(sftp_cb, *args, **kwargs):
with _connect_sftp(*args, **kwargs) as (ssh, sftp):
sftp_cb(ssh, sftp) | A "managed" SFTP session. When the SSH session and an additional SFTP
session are ready, invoke the sftp_cb callback. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/easy.py#L39-L45 | [
"def sftp_cb(ssh, sftp):\n print(\"Name Size Perms Owner\\tGroup\\n\")\n for attributes in sftp.listdir('.'):\n print(\"%-40s %10d %.8o %s(%d)\\t%s(%d)\" % \n (attributes.name[0:40], attributes.size, \n attributes.permissions, attributes.owner, \n attributes.uid, attributes.group,\n attributes.gid))\n",
" def __sftp_cb(self, ssh, sftp):\n# print(\"Opening file.\")\n\n with SftpFile(sftp, 'test_libgksu2.so.0', 'r') as sf:\n buffer_ = sf.read()\n\n with open('/tmp/sftp_dump', 'wb') as f:\n f.write(buffer_)\n",
" def __sftp_cb(self, ssh, sftp):\n# print(\"Creating directory.\")\n sftp.mkdir(\"xyz\")\n\n# print(\"Removing directory.\")\n sftp.rmdir(\"xyz\")\n",
"def __sftp_cb(self, ssh, sftp):\n print(\"SFTP\")\n\n mirror = SftpMirror(sftp)\n\n# mirror.mirror(mirror.mirror_to_local_no_recursion, \n# \"Pictures\", \n# \"/tmp/Pictures\", \n# log_files=True)\n\n mirror.mirror(mirror.mirror_to_remote_no_recursion, \n \"/home/dustin/Pictures\", \n \"/tmp/RemotePictures\", \n log_files=True)\n",
" def __sftp_cb(self, ssh, sftp):\n test_data = b'1234'\n\n with SftpFile(sftp, 'test_sftp_file', 'r+') as sf:\n# print(\"Position at top of file: %d\" % (sf.tell()))\n\n sf.write(test_data)\n# print(\"Position at bottom of file: %d\" % (sf.tell()))\n\n sf.seek(0)\n# print(\"Position at position (0): %d\" % (sf.tell()))\n\n buffer_ = sf.read(100)\n# print(\"Read: [%s]\" % (buffer_))\n\n# print(\"Position after read: %d\" % (sf.tell()))\n sf.seek(0)\n\n# print(\"Position after rewind: %d\" % (sf.tell()))\n\n buffer_ = sf.read(100)\n# print(\"Read 1: (%d) bytes\" % (len(buffer_)))\n# print(\"Position after read 1: %d\" % (sf.tell()))\n\n buffer_ = sf.read(100)\n# print(\"Read 2: (%d) bytes\" % (len(buffer_)))\n# print(\"Position after read 2: %d\" % (sf.tell()))\n\n attr = sf.raw.fstat()\n",
" def __sftp_cb(self, ssh, sftp):\n# print(\"Name Size Perms Owner\\tGroup\\n\")\n for attributes in sftp.listdir('.'):\n print(\"%-40s %10d %.8o %s(%d)\\t%s(%d)\" % \n (attributes.name[0:40], attributes.size, \n attributes.permissions, attributes.owner, \n attributes.uid, attributes.group, attributes.gid))\n",
"def __sftp_cb(self, ssh, sftp):\n mirror = SftpMirror(sftp)\n\n mirror.mirror(mirror.mirror_to_local_no_recursion, \n \"Pictures\", \n \"/tmp/Pictures\", \n log_files=True)\n",
" def __sftp_cb(self, ssh, sftp):\n def dir_cb(path, full_path, entry):\n# print(\"DIR: %s\" % (full_path))\n pass\n\n def listing_cb(path, list_):\n# print(\"[%s]: (%d) files\" % (path, len(list_)))\n pass\n\n sftp.recurse('Pictures', dir_cb, listing_cb)\n",
" def __sftp_cb(self, ssh, sftp):\n with SftpFile(sftp, 'test_doc_rfc1958.txt') as sf:\n i = 0\n for data in sf:\n# stdout.write(\"> \" + data)\n\n if i >= 30:\n break\n\n i += 1\n",
"def __sftp_cb(self, ssh, sftp):\n test_data = b'1234'\n with SftpFile(sftp, 'sftp_write.txt', 'w') as sf:\n sf.write(test_data)\n"
] | import logging
import contextlib
from pysecure.adapters.ssha import SshSession, SshConnect, SshSystem, \
PublicKeyHash, ssh_pki_import_privkey_file
from pysecure.adapters.sftpa import SftpSession, SftpFile
@contextlib.contextmanager
def connect_ssh(user, host, auth_cb, allow_new=True, *args, **kwargs):
with SshSystem():
with SshSession(user=user, host=host, *args, **kwargs) as ssh:
with SshConnect(ssh):
logging.debug("Ready to authenticate.")
ssh.is_server_known(allow_new=allow_new)
auth_cb(ssh)
yield ssh
def connect_ssh_with_cb(ssh_cb, user, host, auth_cb, allow_new=True,
verbosity=0):
"""A "managed" SSH session. When the session is ready, we'll invoke the
"ssh_cb" callback.
"""
with connect_ssh(user, host, auth_cb, allow_new=True, verbosity=0) as ssh:
ssh_cb(ssh)
@contextlib.contextmanager
def _connect_sftp(ssh, *args, **kwargs):
"""A "managed" SFTP session. When the SSH session and an additional SFTP
session are ready, invoke the sftp_cb callback.
"""
with SftpSession(ssh) as sftp:
yield (ssh, sftp)
# TODO(dustin): Deprecate this call.
def get_key_auth_cb(key_filepath):
"""This is just a convenience function for key-based login."""
def auth_cb(ssh):
key = ssh_pki_import_privkey_file(key_filepath)
ssh.userauth_publickey(key)
return auth_cb
def get_password_auth_cb(password):
"""This is just a convenience function for password-based login."""
def auth_cb(ssh):
ssh.userauth_password(password)
return auth_cb
class EasySsh(object):
"""This class allows a connection to be opened and closed at two separate
points (as opposed to the callback methods, above).
"""
def __init__(self, user, host, auth_cb, allow_new=True, **session_args):
self.__user = user
self.__host = host
self.__auth_cb = auth_cb
self.__allow_new = allow_new
self.__session_args = session_args
self.__log = logging.getLogger('EasySsh')
self.__ssh_session = None
self.__ssh_opened = False
self.__sftp_session = None
self.__sftp_opened = False
def __del__(self):
if self.__ssh_opened is True:
self.close_ssh()
def open_ssh(self):
self.__log.debug("Opening SSH.")
if self.__ssh_opened is True:
raise Exception("Can not open SFTP session that is already open.")
# TODO: This might be required to only be run once, globally.
self.__system = SshSystem()
self.__system.open()
self.__ssh_session = SshSession(user=self.__user, host=self.__host,
**self.__session_args)
self.__ssh_session.open()
self.__connect = SshConnect(self.__ssh_session)
self.__connect.open()
self.__ssh_session.is_server_known(allow_new=self.__allow_new)
self.__auth_cb(self.__ssh_session)
self.__ssh_opened = True
def close_ssh(self):
self.__log.debug("Closing SSH.")
if self.__ssh_opened is False:
raise Exception("Can not close SSH session that is not currently "
"opened.")
if self.__sftp_opened is True:
self.close_sftp()
self.__connect.close()
self.__ssh_session.close()
self.__system.close()
self.__ssh_session = None
self.__ssh_opened = False
def open_sftp(self):
self.__log.debug("Opening SFTP.")
if self.__sftp_opened is True:
raise Exception("Can not open SFTP session that is already open.")
self.__sftp_session = SftpSession(self.__ssh_session)
self.__sftp_session.open()
self.__sftp_opened = True
def close_sftp(self):
self.__log.debug("Closing SFTP.")
if self.__sftp_opened is False:
raise Exception("Can not close SFTP session that is not currently "
"opened.")
self.__sftp_session.close()
self.__sftp_session = None
self.__sftp_opened = False
@property
def ssh(self):
if self.__ssh_opened is False:
raise Exception("Can not return an SSH session. A session is not "
"open.")
return self.__ssh_session
@property
def sftp(self):
if self.__sftp_opened is False:
raise Exception("Can not return an SFTP session. A session is not "
"open.")
return self.__sftp_session
|
dsoprea/PySecure | pysecure/easy.py | get_key_auth_cb | python | def get_key_auth_cb(key_filepath):
def auth_cb(ssh):
key = ssh_pki_import_privkey_file(key_filepath)
ssh.userauth_publickey(key)
return auth_cb | This is just a convenience function for key-based login. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/easy.py#L47-L54 | null | import logging
import contextlib
from pysecure.adapters.ssha import SshSession, SshConnect, SshSystem, \
PublicKeyHash, ssh_pki_import_privkey_file
from pysecure.adapters.sftpa import SftpSession, SftpFile
@contextlib.contextmanager
def connect_ssh(user, host, auth_cb, allow_new=True, *args, **kwargs):
with SshSystem():
with SshSession(user=user, host=host, *args, **kwargs) as ssh:
with SshConnect(ssh):
logging.debug("Ready to authenticate.")
ssh.is_server_known(allow_new=allow_new)
auth_cb(ssh)
yield ssh
def connect_ssh_with_cb(ssh_cb, user, host, auth_cb, allow_new=True,
verbosity=0):
"""A "managed" SSH session. When the session is ready, we'll invoke the
"ssh_cb" callback.
"""
with connect_ssh(user, host, auth_cb, allow_new=True, verbosity=0) as ssh:
ssh_cb(ssh)
@contextlib.contextmanager
def _connect_sftp(ssh, *args, **kwargs):
"""A "managed" SFTP session. When the SSH session and an additional SFTP
session are ready, invoke the sftp_cb callback.
"""
with SftpSession(ssh) as sftp:
yield (ssh, sftp)
# TODO(dustin): Deprecate this call.
def connect_sftp_with_cb(sftp_cb, *args, **kwargs):
"""A "managed" SFTP session. When the SSH session and an additional SFTP
session are ready, invoke the sftp_cb callback.
"""
with _connect_sftp(*args, **kwargs) as (ssh, sftp):
sftp_cb(ssh, sftp)
def get_password_auth_cb(password):
"""This is just a convenience function for password-based login."""
def auth_cb(ssh):
ssh.userauth_password(password)
return auth_cb
class EasySsh(object):
"""This class allows a connection to be opened and closed at two separate
points (as opposed to the callback methods, above).
"""
def __init__(self, user, host, auth_cb, allow_new=True, **session_args):
self.__user = user
self.__host = host
self.__auth_cb = auth_cb
self.__allow_new = allow_new
self.__session_args = session_args
self.__log = logging.getLogger('EasySsh')
self.__ssh_session = None
self.__ssh_opened = False
self.__sftp_session = None
self.__sftp_opened = False
def __del__(self):
if self.__ssh_opened is True:
self.close_ssh()
def open_ssh(self):
self.__log.debug("Opening SSH.")
if self.__ssh_opened is True:
raise Exception("Can not open SFTP session that is already open.")
# TODO: This might be required to only be run once, globally.
self.__system = SshSystem()
self.__system.open()
self.__ssh_session = SshSession(user=self.__user, host=self.__host,
**self.__session_args)
self.__ssh_session.open()
self.__connect = SshConnect(self.__ssh_session)
self.__connect.open()
self.__ssh_session.is_server_known(allow_new=self.__allow_new)
self.__auth_cb(self.__ssh_session)
self.__ssh_opened = True
def close_ssh(self):
self.__log.debug("Closing SSH.")
if self.__ssh_opened is False:
raise Exception("Can not close SSH session that is not currently "
"opened.")
if self.__sftp_opened is True:
self.close_sftp()
self.__connect.close()
self.__ssh_session.close()
self.__system.close()
self.__ssh_session = None
self.__ssh_opened = False
def open_sftp(self):
self.__log.debug("Opening SFTP.")
if self.__sftp_opened is True:
raise Exception("Can not open SFTP session that is already open.")
self.__sftp_session = SftpSession(self.__ssh_session)
self.__sftp_session.open()
self.__sftp_opened = True
def close_sftp(self):
self.__log.debug("Closing SFTP.")
if self.__sftp_opened is False:
raise Exception("Can not close SFTP session that is not currently "
"opened.")
self.__sftp_session.close()
self.__sftp_session = None
self.__sftp_opened = False
@property
def ssh(self):
if self.__ssh_opened is False:
raise Exception("Can not return an SSH session. A session is not "
"open.")
return self.__ssh_session
@property
def sftp(self):
if self.__sftp_opened is False:
raise Exception("Can not return an SFTP session. A session is not "
"open.")
return self.__sftp_session
|
dsoprea/PySecure | pysecure/_version.py | git_versions_from_vcs | python | def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
versions = {}
full_revisionid = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_revisionid is None:
return {}
versions["full_revisionid"] = full_revisionid.strip()
d = run_command(GITS,
["describe", "--tags", "--dirty", "--always", "--long"],
cwd=root)
if d is None:
return {}
d = d.strip()
# "TAG-DIST-gHASH[-dirty]" , where DIST might be "0"
# or just "HASH[-dirty]" if there are no ancestor tags
versions["long"] = d
mo1 = re.search(r"^(.*)-(\d+)-g([0-9a-f]+)(-dirty)?$", d)
mo2 = re.search(r"^([0-9a-f]+)(-dirty)?$", d)
if mo1:
rawtag = mo1.group(1)
if not rawtag.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (rawtag, tag_prefix))
return {}
tag = rawtag[len(tag_prefix):]
versions["closest_tag"] = tag
versions["distance"] = int(mo1.group(2))
versions["short_revisionid"] = mo1.group(3)
versions["dirty"] = bool(mo1.group(4))
versions["pep440"] = tag
if versions["distance"]:
versions["describe"] = d
versions["pep440"] += ".post%d" % versions["distance"]
else:
versions["describe"] = tag
if versions["dirty"]:
versions["describe"] += "-dirty"
if versions["dirty"]:
# not strictly correct, as X.dev0 sorts "earlier" than X, but we
# need some way to distinguish the two. You shouldn't be shipping
# -dirty code anyways.
versions["pep440"] += ".dev0"
versions["default"] = versions["describe"]
elif mo2: # no ancestor tags
versions["closest_tag"] = None
versions["short_revisionid"] = mo2.group(1)
versions["dirty"] = bool(mo2.group(2))
# count revisions to compute ["distance"]
commits = run_command(GITS, ["rev-list", "--count", "HEAD"], cwd=root)
if commits is None:
return {}
versions["distance"] = int(commits.strip())
versions["pep440"] = "0"
if versions["distance"]:
versions["pep440"] += ".post%d" % versions["distance"]
if versions["dirty"]:
versions["pep440"] += ".dev0" # same concern as above
versions["describe"] = d
versions["default"] = "0-%d-g%s" % (versions["distance"], d)
else:
return {}
versions["dash_dirty"] = "-dirty" if versions["dirty"] else ""
versions["closest_tag_or_zero"] = versions["closest_tag"] or "0"
if versions["distance"] == 0:
versions["dash_distance"] = ""
else:
versions["dash_distance"] = "-%d" % versions["distance"]
return versions | Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions(). | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/_version.py#L173-L263 | [
"def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr\n else None))\n break\n except EnvironmentError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % args[0])\n print(e)\n return None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None\n stdout = p.communicate()[0].strip()\n if sys.version >= '3':\n stdout = stdout.decode()\n if p.returncode != 0:\n# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather \n# then returning a None. It's almost always preferable that it would default to \n# being a terminal error unles specifically caught (rather than vice versa).\n if verbose:\n print(\"unable to run %s (error)\" % args[0])\n return None\n return stdout\n"
] |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.10+ (https://github.com/warner/python-versioneer)
"""This text is put at the top of _version.py, and can be keyword-replaced with
version information by the VCS.
"""
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full_revisionid = "$Format:%H$"
git_short_revisionid = "$Format:%h$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = ""
parentdir_prefix = "pysecure-"
versionfile_source = "pysecure/_version.py"
version_string_template = "%(default)s"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather
# then returning a None. It's almost always preferable that it would default to
# being a terminal error unles specifically caught (rather than vice versa).
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
"""Return a dictionary of values derived from the name of our parent
directory (useful when a thoughtfully-named directory is created from an
archive). This is the fourth attempt to find information by get_versions().
"""
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):]
return { "describe": version,
"long": version,
"pep440": version,
}
import re
def git_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs) as f:
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full_revisionid ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full_revisionid"] = mo.group(1)
if line.strip().startswith("git_short_revisionid ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["short_revisionid"] = mo.group(1)
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
shortest_tag = None
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
shortest_tag = ref[len(tag_prefix):]
if verbose:
print("picking %s" % shortest_tag)
break
versions = {
"full_revisionid": keywords["full_revisionid"].strip(),
"short_revisionid": keywords["short_revisionid"].strip(),
"dirty": False, "dash_dirty": "",
"closest_tag": shortest_tag,
"closest_tag_or_zero": shortest_tag or "0",
# "distance" is not provided: cannot deduce from keyword expansion
}
if not shortest_tag and verbose:
print("no suitable tags, using full revision id")
composite = shortest_tag or versions["full_revisionid"]
versions["describe"] = composite
versions["long"] = composite
versions["default"] = composite
versions["pep440"] = composite
return versions
import re
import sys
import os.path
import os
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
"""This variation of get_versions() will be used in _version.py ."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames,
"full_revisionid": git_full_revisionid,
"short_revisionid": git_short_revisionid }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
# TODO(dustin): Shouldn't this always loop until it fails?
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
|
dsoprea/PySecure | pysecure/_version.py | get_versions | python | def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames,
"full_revisionid": git_full_revisionid,
"short_revisionid": git_short_revisionid }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
# TODO(dustin): Shouldn't this always loop until it fails?
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default) | This variation of get_versions() will be used in _version.py . | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/_version.py#L267-L295 | [
"def versions_from_parentdir(parentdir_prefix, root, verbose=False):\n \"\"\"Return a dictionary of values derived from the name of our parent \n directory (useful when a thoughtfully-named directory is created from an \n archive). This is the fourth attempt to find information by get_versions().\n \"\"\"\n\n # Source tarballs conventionally unpack into a directory that includes\n # both the project name and a version string.\n dirname = os.path.basename(root)\n if not dirname.startswith(parentdir_prefix):\n if verbose:\n print(\"guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'\" %\n (root, dirname, parentdir_prefix))\n return None\n version = dirname[len(parentdir_prefix):]\n return { \"describe\": version,\n \"long\": version,\n \"pep440\": version,\n }\n",
"def git_versions_from_keywords(keywords, tag_prefix, verbose=False):\n if not keywords:\n return {} # keyword-finding function failed to find keywords\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n return {} # unexpanded, so not in an unpacked git-archive tarball\n refs = set([r.strip() for r in refnames.strip(\"()\").split(\",\")])\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = set([r for r in refs if re.search(r'\\d', r)])\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs-tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n shortest_tag = None\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n shortest_tag = ref[len(tag_prefix):]\n if verbose:\n print(\"picking %s\" % shortest_tag)\n break\n versions = {\n \"full_revisionid\": keywords[\"full_revisionid\"].strip(),\n \"short_revisionid\": keywords[\"short_revisionid\"].strip(),\n \"dirty\": False, \"dash_dirty\": \"\",\n \"closest_tag\": shortest_tag,\n \"closest_tag_or_zero\": shortest_tag or \"0\",\n # \"distance\" is not provided: cannot deduce from keyword expansion\n }\n if not shortest_tag and verbose:\n print(\"no suitable tags, using full revision id\")\n composite = shortest_tag or versions[\"full_revisionid\"]\n versions[\"describe\"] = composite\n versions[\"long\"] = composite\n versions[\"default\"] = composite\n versions[\"pep440\"] = composite\n return versions\n",
"def git_versions_from_vcs(tag_prefix, root, verbose=False):\n \"\"\"Return a dictionary of values derived directly from the VCS. This is the\n third attempt to find information by get_versions().\n \"\"\"\n\n # this runs 'git' from the root of the source tree. This only gets called\n # if the git-archive 'subst' keywords were *not* expanded, and\n # _version.py hasn't already been rewritten with a short version string,\n # meaning we're inside a checked out source tree.\n\n if not os.path.exists(os.path.join(root, \".git\")):\n if verbose:\n print(\"no .git in %s\" % root)\n return {}\n\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n\n versions = {}\n\n full_revisionid = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_revisionid is None:\n return {}\n versions[\"full_revisionid\"] = full_revisionid.strip()\n\n d = run_command(GITS,\n [\"describe\", \"--tags\", \"--dirty\", \"--always\", \"--long\"],\n cwd=root)\n if d is None:\n return {}\n d = d.strip()\n # \"TAG-DIST-gHASH[-dirty]\" , where DIST might be \"0\"\n # or just \"HASH[-dirty]\" if there are no ancestor tags\n\n versions[\"long\"] = d\n\n mo1 = re.search(r\"^(.*)-(\\d+)-g([0-9a-f]+)(-dirty)?$\", d)\n mo2 = re.search(r\"^([0-9a-f]+)(-dirty)?$\", d)\n if mo1:\n rawtag = mo1.group(1)\n if not rawtag.startswith(tag_prefix):\n if verbose:\n print(\"tag '%s' doesn't start with prefix '%s'\" % (rawtag, tag_prefix))\n return {}\n tag = rawtag[len(tag_prefix):]\n versions[\"closest_tag\"] = tag\n versions[\"distance\"] = int(mo1.group(2))\n versions[\"short_revisionid\"] = mo1.group(3)\n versions[\"dirty\"] = bool(mo1.group(4))\n versions[\"pep440\"] = tag\n if versions[\"distance\"]:\n versions[\"describe\"] = d\n versions[\"pep440\"] += \".post%d\" % versions[\"distance\"]\n else:\n versions[\"describe\"] = tag\n if versions[\"dirty\"]:\n versions[\"describe\"] += \"-dirty\"\n if versions[\"dirty\"]:\n # not strictly correct, as X.dev0 sorts \"earlier\" than X, but we\n # need some way to distinguish the two. You shouldn't be shipping\n # -dirty code anyways.\n versions[\"pep440\"] += \".dev0\"\n versions[\"default\"] = versions[\"describe\"]\n\n elif mo2: # no ancestor tags\n versions[\"closest_tag\"] = None\n versions[\"short_revisionid\"] = mo2.group(1)\n versions[\"dirty\"] = bool(mo2.group(2))\n # count revisions to compute [\"distance\"]\n commits = run_command(GITS, [\"rev-list\", \"--count\", \"HEAD\"], cwd=root)\n if commits is None:\n return {}\n versions[\"distance\"] = int(commits.strip())\n versions[\"pep440\"] = \"0\"\n if versions[\"distance\"]:\n versions[\"pep440\"] += \".post%d\" % versions[\"distance\"]\n if versions[\"dirty\"]:\n versions[\"pep440\"] += \".dev0\" # same concern as above\n versions[\"describe\"] = d\n versions[\"default\"] = \"0-%d-g%s\" % (versions[\"distance\"], d)\n else:\n return {}\n versions[\"dash_dirty\"] = \"-dirty\" if versions[\"dirty\"] else \"\"\n versions[\"closest_tag_or_zero\"] = versions[\"closest_tag\"] or \"0\"\n if versions[\"distance\"] == 0:\n versions[\"dash_distance\"] = \"\"\n else:\n versions[\"dash_distance\"] = \"-%d\" % versions[\"distance\"]\n\n return versions\n"
] |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.10+ (https://github.com/warner/python-versioneer)
"""This text is put at the top of _version.py, and can be keyword-replaced with
version information by the VCS.
"""
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full_revisionid = "$Format:%H$"
git_short_revisionid = "$Format:%h$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = ""
parentdir_prefix = "pysecure-"
versionfile_source = "pysecure/_version.py"
version_string_template = "%(default)s"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather
# then returning a None. It's almost always preferable that it would default to
# being a terminal error unles specifically caught (rather than vice versa).
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
"""Return a dictionary of values derived from the name of our parent
directory (useful when a thoughtfully-named directory is created from an
archive). This is the fourth attempt to find information by get_versions().
"""
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):]
return { "describe": version,
"long": version,
"pep440": version,
}
import re
def git_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs) as f:
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full_revisionid ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full_revisionid"] = mo.group(1)
if line.strip().startswith("git_short_revisionid ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["short_revisionid"] = mo.group(1)
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
shortest_tag = None
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
shortest_tag = ref[len(tag_prefix):]
if verbose:
print("picking %s" % shortest_tag)
break
versions = {
"full_revisionid": keywords["full_revisionid"].strip(),
"short_revisionid": keywords["short_revisionid"].strip(),
"dirty": False, "dash_dirty": "",
"closest_tag": shortest_tag,
"closest_tag_or_zero": shortest_tag or "0",
# "distance" is not provided: cannot deduce from keyword expansion
}
if not shortest_tag and verbose:
print("no suitable tags, using full revision id")
composite = shortest_tag or versions["full_revisionid"]
versions["describe"] = composite
versions["long"] = composite
versions["default"] = composite
versions["pep440"] = composite
return versions
import re
import sys
import os.path
def git_versions_from_vcs(tag_prefix, root, verbose=False):
"""Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions().
"""
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
versions = {}
full_revisionid = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_revisionid is None:
return {}
versions["full_revisionid"] = full_revisionid.strip()
d = run_command(GITS,
["describe", "--tags", "--dirty", "--always", "--long"],
cwd=root)
if d is None:
return {}
d = d.strip()
# "TAG-DIST-gHASH[-dirty]" , where DIST might be "0"
# or just "HASH[-dirty]" if there are no ancestor tags
versions["long"] = d
mo1 = re.search(r"^(.*)-(\d+)-g([0-9a-f]+)(-dirty)?$", d)
mo2 = re.search(r"^([0-9a-f]+)(-dirty)?$", d)
if mo1:
rawtag = mo1.group(1)
if not rawtag.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (rawtag, tag_prefix))
return {}
tag = rawtag[len(tag_prefix):]
versions["closest_tag"] = tag
versions["distance"] = int(mo1.group(2))
versions["short_revisionid"] = mo1.group(3)
versions["dirty"] = bool(mo1.group(4))
versions["pep440"] = tag
if versions["distance"]:
versions["describe"] = d
versions["pep440"] += ".post%d" % versions["distance"]
else:
versions["describe"] = tag
if versions["dirty"]:
versions["describe"] += "-dirty"
if versions["dirty"]:
# not strictly correct, as X.dev0 sorts "earlier" than X, but we
# need some way to distinguish the two. You shouldn't be shipping
# -dirty code anyways.
versions["pep440"] += ".dev0"
versions["default"] = versions["describe"]
elif mo2: # no ancestor tags
versions["closest_tag"] = None
versions["short_revisionid"] = mo2.group(1)
versions["dirty"] = bool(mo2.group(2))
# count revisions to compute ["distance"]
commits = run_command(GITS, ["rev-list", "--count", "HEAD"], cwd=root)
if commits is None:
return {}
versions["distance"] = int(commits.strip())
versions["pep440"] = "0"
if versions["distance"]:
versions["pep440"] += ".post%d" % versions["distance"]
if versions["dirty"]:
versions["pep440"] += ".dev0" # same concern as above
versions["describe"] = d
versions["default"] = "0-%d-g%s" % (versions["distance"], d)
else:
return {}
versions["dash_dirty"] = "-dirty" if versions["dirty"] else ""
versions["closest_tag_or_zero"] = versions["closest_tag"] or "0"
if versions["distance"] == 0:
versions["dash_distance"] = ""
else:
versions["dash_distance"] = "-%d" % versions["distance"]
return versions
import os
|
dsoprea/PySecure | pysecure/adapters/ssha.py | _ssh_forward_accept | python | def _ssh_forward_accept(ssh_session, timeout_ms):
ssh_channel = c_ssh_forward_accept(c_void_p(ssh_session),
c_int(timeout_ms))
if ssh_channel is None:
raise SshTimeoutException()
return ssh_channel | Waiting for an incoming connection from a reverse forwarded port. Note
that this results in a kernel block until a connection is received. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/ssha.py#L249-L260 | null | import logging
from ctypes import c_char_p, c_void_p, c_ubyte, byref, cast, c_uint, \
c_int, c_long
from pysecure.exceptions import SshError, SshLoginError, SshHostKeyException, \
SshNonblockingTryAgainException, \
SshTimeoutException
from pysecure.config import DEFAULT_EXECUTE_READ_BLOCK_SIZE
from pysecure.types import c_ssh_key
from pysecure.constants.ssh import SSH_OK, SSH_ERROR, SSH_AGAIN, SSH_EOF, \
\
SSH_AUTH_ERROR, SSH_AUTH_DENIED, \
SSH_AUTH_PARTIAL, SSH_AUTH_AGAIN, \
SSH_AUTH_SUCCESS, \
\
SSH_SERVER_ERROR, SSH_SERVER_NOT_KNOWN, \
SSH_SERVER_KNOWN_OK, \
SSH_SERVER_KNOWN_CHANGED, \
SSH_SERVER_FOUND_OTHER, SSH_OPTIONS, \
SSH_SERVER_FILE_NOT_FOUND, \
\
SSH_CLOSED, \
SSH_READ_PENDING, \
SSH_WRITE_PENDING, \
SSH_CLOSED_ERROR
from pysecure.calls.sshi import c_free, c_ssh_pki_import_privkey_file, \
c_ssh_write_knownhost, c_ssh_get_pubkey_hash, \
c_ssh_is_server_known, c_ssh_connect, \
c_ssh_disconnect, c_ssh_print_hexa, \
c_ssh_get_hexa, c_ssh_free, c_ssh_new, \
c_ssh_options_set, c_ssh_init, \
c_ssh_finalize, c_ssh_userauth_password, \
c_ssh_forward_listen, c_ssh_forward_accept, \
c_ssh_key_new, c_ssh_userauth_publickey, \
c_ssh_key_free, c_ssh_get_disconnect_message, \
c_ssh_get_issue_banner, \
c_ssh_get_openssh_version, c_ssh_get_status, \
c_ssh_get_version, c_ssh_get_serverbanner, \
c_ssh_disconnect, c_ssh_is_blocking, \
c_ssh_threads_get_noop, \
c_ssh_threads_set_callbacks, \
c_ssh_set_blocking
# c_ssh_threads_init, c_ssh_threads_finalize, \
# c_ssh_threads_get_type
from pysecure.adapters.channela import SshChannel
from pysecure.error import ssh_get_error, ssh_get_error_code
from pysecure.utility import bytify, stringify
def _ssh_options_set_string(ssh_session, type_, value):
assert issubclass(value.__class__, str)
value_charp = c_char_p(bytify(value))
result = c_ssh_options_set(c_void_p(ssh_session),
c_int(type_),
cast(value_charp, c_void_p))
if result < 0:
error = ssh_get_error(ssh_session)
raise SshError("Could not set STRING option (%d) to [%s]: %s" %
(type_, value, error))
def _ssh_options_set_uint(ssh_session, type_, value):
value_uint = c_uint(value)
result = c_ssh_options_set(c_void_p(ssh_session),
c_int(type_),
cast(byref(value_uint), c_void_p))
if result < 0:
error = ssh_get_error(ssh_session)
raise SshError("Could not set UINT option (%d) to (%d): %s" %
(type_, value, error))
def _ssh_options_set_int(ssh_session, type_, value):
value_int = c_int(value)
result = c_ssh_options_set(c_void_p(ssh_session),
c_int(type_),
cast(byref(value_int), c_void_p))
if result < 0:
error = ssh_get_error(ssh_session)
raise SshError("Could not set INT option (%d) to (%d): %s" %
(type_, value, error))
def _ssh_options_set_long(ssh_session, type_, value):
value_long = c_long(value)
result = c_ssh_options_set(c_void_p(ssh_session),
c_int(type_),
cast(byref(value_long), c_void_p))
if result < 0:
error = ssh_get_error(ssh_session)
raise SshError("Could not set LONG option (%d) to (%d): %s" %
(type_, value, error))
def _ssh_new():
ssh_session = c_ssh_new()
if ssh_session is None:
raise SshError("Could not create session.")
return ssh_session
def _ssh_free(ssh_session):
c_ssh_free(c_void_p(ssh_session))
def _ssh_connect(ssh_session):
result = c_ssh_connect(c_void_p(ssh_session))
if result == SSH_AGAIN:
raise SshNonblockingTryAgainException()
elif result != SSH_OK:
error = ssh_get_error(ssh_session)
raise SshError("Connect failed: %s" % (error))
def _ssh_disconnect(ssh_session):
c_ssh_disconnect(c_void_p(ssh_session))
def _ssh_is_server_known(ssh_session, allow_new=False, cb=None):
result = c_ssh_is_server_known(c_void_p(ssh_session))
if result == SSH_SERVER_KNOWN_OK:
if cb is not None:
hk = repr(PublicKeyHash(ssh_session))
allow_auth = cb(hk, True)
logging.debug("Host-key callback returned [%s] when a host-key has "
"already been accepted." % (allow_auth))
if allow_auth is False:
raise SshHostKeyException("Existing host-key was failed by "
"callback.")
logging.debug("Server host-key authenticated.")
return
if result == SSH_SERVER_KNOWN_CHANGED:
raise SshHostKeyException("Host key: Server has changed.")
elif result == SSH_SERVER_FOUND_OTHER:
raise SshHostKeyException("Host key: Server -type- has changed.")
elif result == SSH_SERVER_FILE_NOT_FOUND or result == SSH_SERVER_NOT_KNOWN:
logging.warn("Server is not already known.")
if allow_new is False:
if result == SSH_SERVER_FILE_NOT_FOUND:
raise SshHostKeyException("Host key: The known-hosts file was "
"not found, and we are not "
"accepting new hosts.")
raise SshHostKeyException("An existing host-key was not found. "
"Our policy is to deny new hosts.")
if cb is not None:
hk = repr(PublicKeyHash(ssh_session))
allow_auth = cb(hk, allow_new)
logging.debug("Host-key callback returned [%s] when no host-key "
"yet available." % (allow_auth))
if allow_auth is False:
raise SshHostKeyException("New host-key was failed by "
"callback.")
logging.warn("Recording host-key for server.")
c_ssh_write_knownhost(ssh_session)
elif result == SSH_SERVER_ERROR:
raise SshHostKeyException("Host key: Server error.")
else:
raise SshHostKeyException("Host key: Failed (unexpected error).")
def _ssh_print_hexa(title, hash_, hlen):
assert issubclass(title.__class__, str)
c_ssh_print_hexa(c_char_p(bytify(title)), hash_, c_int(hlen))
def _ssh_get_hexa(hash_, hlen):
hexa = c_ssh_get_hexa(hash_, c_int(hlen))
if hexa is None:
raise SshError("Could not build hex-string.")
return hexa
def _ssh_write_knownhost(ssh_session):
logging.debug("Updating known-hosts file.")
result = c_ssh_write_knownhost(c_void_p(ssh_session))
if result != SSH_OK:
error = ssh_get_error(ssh_session)
raise SshError("Could not update known-hosts file: %s" % (error))
def _check_auth_response(result):
if result == SSH_AUTH_ERROR:
raise SshLoginError("Login failed: Auth error.")
elif result == SSH_AUTH_DENIED:
raise SshLoginError("Login failed: Auth denied.")
elif result == SSH_AUTH_PARTIAL:
raise SshLoginError("Login failed: Auth partial.")
elif result == SSH_AUTH_AGAIN:
raise SshLoginError("Login failed: Auth again.")
elif result != SSH_AUTH_SUCCESS:
raise SshLoginError("Login failed (unexpected error).")
def _ssh_userauth_password(ssh_session, username, password):
if username is not None:
assert issubclass(username.__class__, str)
assert issubclass(password.__class__, str)
logging.debug("Authenticating with a password for user [%s]." % (username))
result = c_ssh_userauth_password(c_void_p(ssh_session), \
c_char_p(bytify(username)), \
c_char_p(bytify(password)))
_check_auth_response(result)
def _ssh_init():
result = c_ssh_init()
if result < 0:
raise SshError("Could not initialize SSH.")
def _ssh_finalize():
result = c_ssh_finalize()
if result < 0:
raise SshError("Could not finalize SSH.")
def _ssh_forward_listen(ssh_session, address, port):
if address is not None:
assert issubclass(address.__class__, str)
address = bytify(address)
bound_port = c_int()
# BUG: Currently always returns SSH_AGAIN in 0.6.0 . Registered as bug #126.
result = c_ssh_forward_listen(ssh_session,
address,
port,
byref(bound_port))
if result == SSH_AGAIN:
raise SshNonblockingTryAgainException()
elif result != SSH_OK:
error = ssh_get_error(ssh_session)
raise SshError("Forward-listen failed: %s" % (error))
return bound_port.value
def _ssh_key_new():
key = c_ssh_key_new()
if key is None:
raise SshError("Could not create empty key.")
return key
def _ssh_userauth_publickey(ssh_session, priv_key):
result = c_ssh_userauth_publickey(c_void_p(ssh_session),
None,
priv_key)
_check_auth_response(result)
def ssh_pki_import_privkey_file(file_path, pass_phrase=None):
assert issubclass(file_path.__class__, str)
logging.debug("Importing private-key from [%s]." % (file_path))
key = c_ssh_key()
# TODO: This needs to be freed. Use our key class.
file_path = bytify(file_path)
if pass_phrase is not None:
assert issubclass(pass_phrase.__class__, str)
pass_phrase = bytify(pass_phrase)
result = c_ssh_pki_import_privkey_file(c_char_p(file_path),
c_char_p(pass_phrase),
None,
None,
byref(key))
if result == SSH_EOF:
raise SshError("Key file [%s] does not exist or could not be read." %
(file_path))
elif result != SSH_OK:
raise SshError("Could not import key.")
return key
def _ssh_is_blocking(ssh_session):
result = c_ssh_is_blocking(c_void_p(ssh_session))
return bool(result)
def _ssh_get_disconnect_message(ssh_session):
message = c_ssh_get_disconnect_message(c_void_p(ssh_session))
if message is None:
return (ssh_get_error_code(ssh_session), True)
return (message, False)
def _ssh_get_issue_banner(ssh_session):
"""Get the "issue banner" for the server. Note that this function may/will
fail if the server isn't configured for such a message (like some/all
Ubuntu installs). In the event of failure, we'll just return an empty
string.
"""
message = c_ssh_get_issue_banner(c_void_p(ssh_session))
# TODO: Does "newly allocated" string have to be freed? We might have to reallocate it as a Python string.
if message is None:
return ''
return stringify(message)
def _ssh_get_openssh_version(ssh_session):
"""Returns an encoded version. Comparisons can be done with the
SSH_INT_VERSION macro.
"""
openssh_server_version = c_ssh_get_openssh_version(c_void_p(ssh_session))
if openssh_server_version == 0:
raise SshError("Could not get OpenSSH version. Server may not be "
"OpenSSH.")
return openssh_server_version
def _ssh_get_status(ssh_session):
result = c_ssh_get_status(c_void_p(ssh_session))
# TODO: This is returning bad flags (SSH_CLOSED_ERROR is True). Reported as bug
# #119.
return { 'SSH_CLOSED': (result & SSH_CLOSED) > 0,
'SSH_READ_PENDING': (result & SSH_READ_PENDING) > 0,
'SSH_WRITE_PENDING': (result & SSH_WRITE_PENDING) > 0,
'SSH_CLOSED_ERROR': (result & SSH_CLOSED_ERROR) > 0 }
def _ssh_get_version(ssh_session):
protocol_version = c_ssh_get_version(ssh_session)
if protocol_version < 0:
raise SshError("Could not determine protocol version.")
return protocol_version
def _ssh_get_serverbanner(ssh_session):
result = c_ssh_get_serverbanner(c_void_p(ssh_session))
if result is None:
raise SshError("Could not get server-banner.")
return result
def _ssh_disconnect(ssh_session):
c_ssh_disconnect(c_void_p(ssh_session))
def ssh_threads_get_noop():
return c_ssh_threads_get_noop()
def ssh_threads_set_callbacks(cb):
result = c_ssh_threads_set_callbacks(c_void_p(cb))
if result != SSH_OK:
raise SshError("Could not set callbacks.")
def _ssh_set_blocking(ssh_session, blocking):
c_ssh_set_blocking(c_void_p(ssh_session), c_int(blocking))
class SshSystem(object):
def __enter__(self):
return self.open()
def open(self):
logging.debug("Initializing SSH system.")
_ssh_init()
def __exit__(self, e_type, e_value, e_tb):
self.close()
def close(self):
logging.debug("Cleaning-up SSH system.")
_ssh_finalize
class SshSession(object):
def __init__(self, **options):
self.__options = options
self.__ssh_session_ptr = _ssh_new()
self.__log = logging.getLogger('SSH_SESSION(%d)' %
(self.__ssh_session_ptr))
self.__log.debug("Created session.")
if 'blocking' in options:
self.set_blocking(options['blocking'])
# SSH_OPTIONS doesn't contain blocking and will crash if it finds it
del self.__options['blocking']
def __enter__(self):
return self.open()
def open(self):
for k, v in self.__options.items():
(option_id, type_) = SSH_OPTIONS[k]
if type_ == 'string':
option_setter = _ssh_options_set_string
elif type_ == 'uint':
option_setter = _ssh_options_set_uint
elif type_ == 'int':
option_setter = _ssh_options_set_int
elif type_ == 'long':
option_setter = _ssh_options_set_long
elif type_ == 'bool':
v = 0 if v is False else 1
option_setter = _ssh_options_set_int
else:
raise SshError("Option type [%s] is invalid." % (type_))
self.__log.debug("Setting option [%s] (%d) to [%s]." %
(k, option_id, v))
option_setter(self.__ssh_session_ptr, option_id, v)
return self
def __exit__(self, e_type, e_value, e_tb):
self.close()
def close(self):
# _ssh_free doesn't seem to imply a formal disconnect.
self.disconnect()
(message, is_error) = self.get_disconnect_message()
self.__log.debug("Disconnect message: %s (error= %s)" %
(message, is_error))
self.__log.debug("Freeing SSH session: %d" % (self.__ssh_session_ptr))
_ssh_free(self.__ssh_session_ptr)
def forward_listen(self, address, port):
return _ssh_forward_listen(self.__ssh_session_ptr, address, port)
def forward_accept(self, timeout_ms):
ssh_channel_int = _ssh_forward_accept(self.__ssh_session_ptr, \
timeout_ms)
return SshChannel(self, ssh_channel_int)
def is_server_known(self, allow_new=False, cb=None):
return _ssh_is_server_known(self.__ssh_session_ptr, allow_new, cb)
def write_knownhost(self):
return _ssh_write_knownhost(self.__ssh_session_ptr)
def userauth_password(self, password):
return _ssh_userauth_password(self.__ssh_session_ptr, None, password)
def userauth_publickey(self, privkey):
"""This is the recommended function. Supports EC keys."""
return _ssh_userauth_publickey(self.__ssh_session_ptr, privkey)
def execute(self, cmd, block_size=DEFAULT_EXECUTE_READ_BLOCK_SIZE):
"""Execute a remote command. This functionality does not support more
than one command to be executed on the same channel, so we create a
dedicated channel at the session level than allowing direct access at
the channel level.
"""
with SshChannel(self) as sc:
self.__log.debug("Executing command: %s" % (cmd))
sc.open_session()
sc.request_exec(cmd)
buffer_ = bytearray()
while 1:
bytes = sc.read(block_size)
yield bytes
if len(bytes) < block_size:
break
def is_blocking(self):
return _ssh_is_blocking(self.__ssh_session_ptr)
def set_blocking(self, blocking=True):
_ssh_set_blocking(self.__ssh_session_ptr, blocking)
def get_error_code(self):
return ssh_get_error_code(self.__ssh_session_ptr)
def get_error(self):
return ssh_get_error(self.__ssh_session_ptr)
def get_disconnect_message(self):
return _ssh_get_disconnect_message(self.__ssh_session_ptr)
def get_issue_banner(self):
return _ssh_get_issue_banner(self.__ssh_session_ptr)
def get_openssh_version(self):
return _ssh_get_openssh_version(self.__ssh_session_ptr)
def get_status(self):
return _ssh_get_status(self.__ssh_session_ptr)
def get_version(self):
return _ssh_get_version(self.__ssh_session_ptr)
def get_serverbanner(self):
return _ssh_get_serverbanner(self.__ssh_session_ptr)
def disconnect(self):
return _ssh_disconnect(self.__ssh_session_ptr)
@property
def session_id(self):
return self.__ssh_session_ptr
class SshConnect(object):
def __init__(self, ssh_session):
self.__ssh_session_ptr = getattr(ssh_session,
'session_id',
ssh_session)
def __enter__(self):
return self.open()
def open(self):
logging.debug("Connecting SSH.")
_ssh_connect(self.__ssh_session_ptr)
def __exit__(self, e_type, e_value, e_tb):
self.close()
def close(self):
logging.debug("Disconnecting SSH.")
_ssh_disconnect(self.__ssh_session_ptr)
class _PublicKeyHashString(object):
def __init__(self, hash_, hlen):
self.__hexa = _ssh_get_hexa(hash_, hlen)
def __repr__(self):
hexa_string = cast(self.__hexa, c_char_p)
# TODO: We do an empty concatenate just to ensure that we are making a copy.
return hexa_string.value + ""
def __del__(self):
c_free(self.__hexa)
class PublicKeyHash(object):
def __init__(self, ssh_session):
ssh_session_int = getattr(ssh_session, 'session_id', ssh_session)
self.__hasht = _ssh_get_pubkey_hash(ssh_session_int)
def __del__(self):
c_free(self.__hasht[0])
def print_string(self, title="Public key"):
_ssh_print_hexa(title, *self.__hasht)
def __repr__(self):
pks = _PublicKeyHashString(*self.__hasht)
return repr(pks)
|
dsoprea/PySecure | pysecure/adapters/ssha.py | SshSession.execute | python | def execute(self, cmd, block_size=DEFAULT_EXECUTE_READ_BLOCK_SIZE):
with SshChannel(self) as sc:
self.__log.debug("Executing command: %s" % (cmd))
sc.open_session()
sc.request_exec(cmd)
buffer_ = bytearray()
while 1:
bytes = sc.read(block_size)
yield bytes
if len(bytes) < block_size:
break | Execute a remote command. This functionality does not support more
than one command to be executed on the same channel, so we create a
dedicated channel at the session level than allowing direct access at
the channel level. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/ssha.py#L476-L495 | null | class SshSession(object):
def __init__(self, **options):
self.__options = options
self.__ssh_session_ptr = _ssh_new()
self.__log = logging.getLogger('SSH_SESSION(%d)' %
(self.__ssh_session_ptr))
self.__log.debug("Created session.")
if 'blocking' in options:
self.set_blocking(options['blocking'])
# SSH_OPTIONS doesn't contain blocking and will crash if it finds it
del self.__options['blocking']
def __enter__(self):
return self.open()
def open(self):
for k, v in self.__options.items():
(option_id, type_) = SSH_OPTIONS[k]
if type_ == 'string':
option_setter = _ssh_options_set_string
elif type_ == 'uint':
option_setter = _ssh_options_set_uint
elif type_ == 'int':
option_setter = _ssh_options_set_int
elif type_ == 'long':
option_setter = _ssh_options_set_long
elif type_ == 'bool':
v = 0 if v is False else 1
option_setter = _ssh_options_set_int
else:
raise SshError("Option type [%s] is invalid." % (type_))
self.__log.debug("Setting option [%s] (%d) to [%s]." %
(k, option_id, v))
option_setter(self.__ssh_session_ptr, option_id, v)
return self
def __exit__(self, e_type, e_value, e_tb):
self.close()
def close(self):
# _ssh_free doesn't seem to imply a formal disconnect.
self.disconnect()
(message, is_error) = self.get_disconnect_message()
self.__log.debug("Disconnect message: %s (error= %s)" %
(message, is_error))
self.__log.debug("Freeing SSH session: %d" % (self.__ssh_session_ptr))
_ssh_free(self.__ssh_session_ptr)
def forward_listen(self, address, port):
return _ssh_forward_listen(self.__ssh_session_ptr, address, port)
def forward_accept(self, timeout_ms):
ssh_channel_int = _ssh_forward_accept(self.__ssh_session_ptr, \
timeout_ms)
return SshChannel(self, ssh_channel_int)
def is_server_known(self, allow_new=False, cb=None):
return _ssh_is_server_known(self.__ssh_session_ptr, allow_new, cb)
def write_knownhost(self):
return _ssh_write_knownhost(self.__ssh_session_ptr)
def userauth_password(self, password):
return _ssh_userauth_password(self.__ssh_session_ptr, None, password)
def userauth_publickey(self, privkey):
"""This is the recommended function. Supports EC keys."""
return _ssh_userauth_publickey(self.__ssh_session_ptr, privkey)
def execute(self, cmd, block_size=DEFAULT_EXECUTE_READ_BLOCK_SIZE):
"""Execute a remote command. This functionality does not support more
than one command to be executed on the same channel, so we create a
dedicated channel at the session level than allowing direct access at
the channel level.
"""
with SshChannel(self) as sc:
self.__log.debug("Executing command: %s" % (cmd))
sc.open_session()
sc.request_exec(cmd)
buffer_ = bytearray()
while 1:
bytes = sc.read(block_size)
yield bytes
if len(bytes) < block_size:
break
def is_blocking(self):
return _ssh_is_blocking(self.__ssh_session_ptr)
def set_blocking(self, blocking=True):
_ssh_set_blocking(self.__ssh_session_ptr, blocking)
def get_error_code(self):
return ssh_get_error_code(self.__ssh_session_ptr)
def get_error(self):
return ssh_get_error(self.__ssh_session_ptr)
def get_disconnect_message(self):
return _ssh_get_disconnect_message(self.__ssh_session_ptr)
def get_issue_banner(self):
return _ssh_get_issue_banner(self.__ssh_session_ptr)
def get_openssh_version(self):
return _ssh_get_openssh_version(self.__ssh_session_ptr)
def get_status(self):
return _ssh_get_status(self.__ssh_session_ptr)
def get_version(self):
return _ssh_get_version(self.__ssh_session_ptr)
def get_serverbanner(self):
return _ssh_get_serverbanner(self.__ssh_session_ptr)
def disconnect(self):
return _ssh_disconnect(self.__ssh_session_ptr)
@property
def session_id(self):
return self.__ssh_session_ptr
|
dsoprea/PySecure | pysecure/adapters/channela.py | _ssh_channel_read | python | def _ssh_channel_read(ssh_channel_int, count, is_stderr):
buffer_ = create_string_buffer(count)
while 1:
received_bytes = c_ssh_channel_read(ssh_channel_int,
cast(buffer_, c_void_p),
c_uint32(count),
c_int(int(is_stderr)))
if received_bytes == SSH_ERROR:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Channel read failed: %s" % (error))
# BUG: We're not using the nonblocking variant, but this can still
# return SSH_AGAIN due to that call's broken dependencies.
# TODO: This call might return SSH_AGAIN, even though we should always be
# blocking. Reported as bug #115.
elif received_bytes == SSH_AGAIN:
continue
else:
break
# TODO: Where is the timeout configured for the read?
return buffer_.raw[0:received_bytes] | Do a read on a channel. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/channela.py#L79-L106 | [
"def ssh_get_error(ssh_session_int):\n return c_ssh_get_error(ssh_session_int)\n",
"def _ssh_channel_get_session(ssh_channel_int):\n return c_ssh_channel_get_session(ssh_channel_int)\n"
] | import logging
from ctypes import c_char_p, c_void_p, cast, c_uint32, c_int, \
create_string_buffer
from time import time
from pysecure.config import NONBLOCK_READ_TIMEOUT_MS, \
DEFAULT_SHELL_READ_BLOCK_SIZE
from pysecure.constants.ssh import SSH_OK, SSH_ERROR, SSH_AGAIN
from pysecure.exceptions import SshError, SshNonblockingTryAgainException, \
SshNoDataReceivedException, SshTimeoutException
from pysecure.utility import sync, bytify, stringify
from pysecure.calls.channeli import c_ssh_channel_new, \
c_ssh_channel_open_forward, \
c_ssh_channel_write, c_ssh_channel_free, \
c_ssh_channel_read, \
c_ssh_channel_send_eof, \
c_ssh_channel_is_open, \
c_ssh_channel_open_session, \
c_ssh_channel_request_exec, \
c_ssh_channel_request_shell, \
c_ssh_channel_request_pty, \
c_ssh_channel_change_pty_size, \
c_ssh_channel_is_eof, \
c_ssh_channel_read_nonblocking, \
c_ssh_channel_request_env, \
c_ssh_channel_get_session, \
c_ssh_channel_accept_x11, \
c_ssh_channel_request_x11
from pysecure.error import ssh_get_error, ssh_get_error_code
def _ssh_channel_new(ssh_session_int):
logging.debug("Opening channel on session.")
result = c_ssh_channel_new(ssh_session_int)
if result is None:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Could not open channel: %s" % (error))
return result
def _ssh_channel_open_forward(ssh_channel_int, host_remote, port_remote,
host_source, port_local):
logging.debug("Requesting forward on channel.")
result = c_ssh_channel_open_forward(ssh_channel_int,
c_char_p(bytify(host_remote)),
c_int(port_remote),
c_char_p(bytify(host_source)),
c_int(port_local))
if result == SSH_AGAIN:
raise SshNonblockingTryAgainException()
elif result != SSH_OK:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Forward failed: %s" % (error))
def _ssh_channel_write(ssh_channel_int, data):
data_len = len(data)
sent_bytes = c_ssh_channel_write(ssh_channel_int,
cast(c_char_p(data), c_void_p),
c_uint32(data_len))
if sent_bytes == SSH_ERROR:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Channel write failed: %s" % (error))
elif sent_bytes != data_len:
raise SshError("Channel write of (%d) bytes failed for length (%d) of "
"written data." % (data_len, sent_bytes))
def _ssh_channel_read_nonblocking(ssh_channel_int, count, is_stderr):
buffer_ = create_string_buffer(count)
received_bytes = c_ssh_channel_read_nonblocking(ssh_channel_int,
cast(buffer_, c_void_p),
c_uint32(count),
c_int(int(is_stderr)))
if received_bytes == SSH_ERROR:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Channel read (non-blocking) failed: %s" % (error))
return buffer_.raw[0:received_bytes]
def _ssh_channel_free(ssh_channel_int):
logging.debug("Freeing channel (%d)." % (ssh_channel_int))
c_ssh_channel_free(ssh_channel_int)
def _ssh_channel_send_eof(ssh_channel_int):
result = c_ssh_channel_send_eof(ssh_channel_int)
if result != SSH_OK:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Could not send EOF: %s" % (error))
def _ssh_channel_is_open(ssh_channel_int):
result = c_ssh_channel_is_open(ssh_channel_int)
return (result != 0)
def _ssh_channel_open_session(ssh_channel_int):
logging.debug("Request channel open-session.")
result = c_ssh_channel_open_session(ssh_channel_int)
if result != SSH_OK:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Could not open session on channel: %s" % (error))
logging.debug("Channel open-session successful.")
def _ssh_channel_request_exec(ssh_channel_int, cmd):
logging.debug("Requesting channel exec.")
result = c_ssh_channel_request_exec(ssh_channel_int,
c_char_p(bytify(cmd)))
if result == SSH_AGAIN:
raise SshNonblockingTryAgainException()
elif result != SSH_OK:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Could not execute shell request on channel: %s" %
(error))
logging.debug("Channel-exec successful.")
def _ssh_channel_request_shell(ssh_channel_int):
logging.debug("Requesting channel shell.")
result = c_ssh_channel_request_shell(ssh_channel_int)
if result == SSH_AGAIN:
raise SshNonblockingTryAgainException()
elif result != SSH_OK:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Shell request failed: %s" % (error))
logging.debug("Channel-shell request successful.")
def _ssh_channel_request_pty(ssh_channel_int):
logging.debug("Requesting channel PTY.")
result = c_ssh_channel_request_pty(ssh_channel_int)
if result == SSH_AGAIN:
raise SshNonblockingTryAgainException()
elif result != SSH_OK:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("PTY request failed: %s" % (error))
logging.debug("Channel PTY request successful.")
def _ssh_channel_change_pty_size(ssh_channel_int, col, row):
result = c_ssh_channel_change_pty_size(ssh_channel_int, c_int(col), c_int(row))
if result != SSH_OK:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("PTY size change failed: %s" % (error))
def _ssh_channel_is_eof(ssh_channel_int):
result = c_ssh_channel_is_eof(ssh_channel_int)
return bool(result)
def _ssh_channel_request_env(ssh_channel_int, name, value):
logging.debug("Setting remote environment variable [%s] to [%s]." %
(name, value))
# TODO: We haven't been able to get this to work. Reported bug #125.
result = c_ssh_channel_request_env(ssh_channel_int,
c_char_p(bytify(name)),
c_char_p(bytify(value)))
if result == SSH_AGAIN:
raise SshNonblockingTryAgainException()
elif result != SSH_OK:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Request-env failed: %s" % (error))
def _ssh_channel_get_session(ssh_channel_int):
return c_ssh_channel_get_session(ssh_channel_int)
def _ssh_channel_accept_x11(ssh_channel_int, timeout_ms):
ssh_channel_accepted = c_ssh_channel_accept_x11(ssh_channel_int,
timeout_ms)
if ssh_channel_accepted is None:
raise SshTimeoutException()
return ssh_channel_accept
def _ssh_channel_request_x11(ssh_channel_int, screen_number=0,
single_connection=False, protocol=None,
cookie=None):
result = c_ssh_channel_request_x11(ssh_channel_int, int(single_connection),
c_char_p(bytify(protocol)), \
c_char_p(bytify(cookie)),
screen_number)
if result == SSH_AGAIN:
raise SshNonblockingTryAgainException()
elif result != SSH_OK:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Channel request-X11 failed: %s" % (error))
class SshChannel(object):
def __init__(self, ssh_session, ssh_channel=None):
self.__ssh_session_int = getattr(ssh_session,
'session_id',
ssh_session)
self.__ssh_channel_int = getattr(ssh_channel,
'session_id',
ssh_channel)
def __enter__(self):
if self.__ssh_channel_int is None:
self.__ssh_channel_int = _ssh_channel_new(self.__ssh_session_int)
return self
def __exit__(self, e_type, e_value, e_tb):
# The documentation says that a "free" implies a "close", and that a
# "close" implies a "send eof". From a cursory glance, this seems
# accurate.
_ssh_channel_free(self.__ssh_channel_int)
self.__ssh_channel_int = None
def __del__(self):
# The documentation says that a "free" implies a "close", and that a
# "close" implies a "send eof". From a cursory glance, this seems
# accurate.
if self.__ssh_channel_int is not None:
_ssh_channel_free(self.__ssh_channel_int)
def open_forward(self, host_remote, port_remote, host_source, port_local):
_ssh_channel_open_forward(self.__ssh_channel_int,
host_remote,
port_remote,
host_source,
port_local)
def write(self, data):
_ssh_channel_write(self.__ssh_channel_int, data)
def read(self, count, is_stderr=False):
return _ssh_channel_read(self.__ssh_channel_int, count, is_stderr)
def read_nonblocking(self, count, is_stderr=False):
return _ssh_channel_read_nonblocking(self.__ssh_channel_int,
count,
is_stderr)
def send_eof(self):
_ssh_channel_send_eof(self.__ssh_channel_int)
def is_open(self):
return _ssh_channel_is_open(self.__ssh_channel_int)
def open_session(self):
_ssh_channel_open_session(self.__ssh_channel_int)
def request_exec(self, cmd):
"""Execute a command. Note that this can only be done once, and may be
the only operation performed with the current channel.
"""
return _ssh_channel_request_exec(self.__ssh_channel_int, cmd)
def request_shell(self):
"""Activate shell services on the channel (for PTY emulation)."""
_ssh_channel_request_shell(self.__ssh_channel_int)
def request_pty(self):
_ssh_channel_request_pty(self.__ssh_channel_int)
def change_pty_size(self, col, row):
_ssh_channel_change_pty_size(self.__ssh_channel_int, col, row)
def is_eof(self):
return _ssh_channel_is_eof(self.__ssh_channel_int)
def request_env(self, name, value):
return _ssh_channel_request_env(self.__ssh_channel_int, name, value)
def accept_x11(self, timeout_ms):
ssh_x11_channel_int = _ssh_channel_accept_x11(self.__ssh_channel_int,
timeout_ms)
return SshChannel(self.__ssh_session_int, ssh_x11_channel_int)
def request_x11(screen_number=0, single_connection=False, protocol=None,
cookie=None):
return _ssh_channel_request_x11(self.__ssh_channel_int, screen_number,
single_connection, protocol, cookie)
class RemoteShellProcessor(object):
def __init__(self, ssh_session, block_size=DEFAULT_SHELL_READ_BLOCK_SIZE):
self.__log = logging.getLogger('RSP')
self.__log.debug("Initializing RSP.")
self.__ssh_session = ssh_session
self.__block_size = block_size
def __wait_on_output(self, data_cb):
self.__log.debug("Reading chunked output.")
start_at = time()
while self.__sc.is_open() and self.__sc.is_eof() is False:
buffer_ = self.__sc.read_nonblocking(self.__block_size)
if buffer_ == b'':
delta = time() - start_at
if delta * 1000 > NONBLOCK_READ_TIMEOUT_MS:
break
continue
data_cb(buffer_)
start_at = time()
def __wait_on_output_all(self, whole_data_cb):
self.__log.debug("Reading complete output.")
received = bytearray()
def data_cb(buffer_):
received.extend(buffer_)
self.__wait_on_output(data_cb)
whole_data_cb(bytes(received))
def do_command(self, command, block_cb=None, add_nl=True,
drop_last_line=True, drop_first_line=True):
self.__log.debug("Sending shell command: %s" % (command.rstrip()))
if add_nl is True:
command += '\n'
self.__sc.write(bytify(command))
if block_cb is not None:
self.__wait_on_output(block_cb)
else:
received = bytearray()
def data_cb(buffer_):
received.extend(bytify(buffer_))
self.__wait_on_output_all(data_cb)
if drop_first_line is True:
received = received[received.index(b'\n') + 1:]
# In all likelihood, the last line is probably the prompt.
if drop_last_line is True:
received = received[:received.rindex(b'\n')]
return bytes(received)
def shell(self, ready_cb, cols=80, rows=24):
self.__log.debug("Starting RSP shell.")
with SshChannel(self.__ssh_session) as sc:
sc.open_session()
sc.request_env('aa', 'bb')
# sc.request_env('LANG', 'en_US.UTF-8')
sc.request_pty()
sc.change_pty_size(cols, rows)
sc.request_shell()
self.__log.debug("Waiting for shell welcome message.")
welcome = bytearray()
def welcome_received_cb(data):
welcome.extend(bytify(data))
self.__sc = sc
self.__wait_on_output_all(welcome_received_cb)
self.__log.debug("RSP shell is ready.")
ready_cb(sc, stringify(welcome))
self.__sc = None
|
dsoprea/PySecure | versioneer.py | svn_versions_from_vcs | python | def svn_versions_from_vcs(tag_prefix, root, verbose=False):
if not os.path.exists(os.path.join(root, '.svn')):
if verbose:
print("no .svn in %s." % root)
return {}
current_module = sys.modules[__name__]
# If we're running from _version.py .
tag_url = getattr(current_module, 'svn_tag_url', None)
# If we're running from versioneer.py .
if tag_url is None:
vcs_settings = getattr(current_module, 'vcs_settings', None)
if vcs_settings is not None and \
'svn' in vcs_settings and \
'tag_url' in vcs_settings['svn']:
tag_url = vcs_settings['svn']['tag_url']
if tag_url is None:
raise ValueError("Please define VCS-specific 'tag_url' setting for "
"'svn' within 'versioneer'.")
svn_commands = ['svn']
info_xml = run_command(svn_commands, ['ls', '--xml', tag_url], cwd=root)
# TODO(dustin): This should raise an EnvironmentError upon failure.
if info_xml is None:
print("Error accessing Subversion for latest version.")
return {}
(releases, latest_revision) = svn_parse_tag_xml(info_xml)
release_info = releases[latest_revision]
release_name = release_info['name']
versions = { 'default': release_name,
'version': release_name,
'full': release_name }
# Examples of strings returned by Git.
#
# versions["closest_tag"]
# versions["distance"]
# versions["short_revisionid"]
# versions["dirty"]
# versions["pep440"]
# versions["describe"]
# versions["default"]
# versions["dash_dirty"]
# versions["closest_tag_or_zero"]
# versions["dash_distance"]
return versions | Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions(). | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/versioneer.py#L1061-L1116 | [
"def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr\n else None))\n break\n except EnvironmentError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % args[0])\n print(e)\n return None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None\n stdout = p.communicate()[0].strip()\n if sys.version >= '3':\n stdout = stdout.decode()\n if p.returncode != 0:\n# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather \n# then returning a None. It's almost always preferable that it would default to \n# being a terminal error unles specifically caught (rather than vice versa).\n if verbose:\n print(\"unable to run %s (error)\" % args[0])\n return None\n return stdout\n",
"def svn_parse_tag_xml(info_xml):\n root = ElementTree.fromstring(info_xml)\n\n release_list = root.find('list')\n releases = {}\n latest_revision = 0\n for release in release_list:\n release = dict([(e.tag, e) for e in release])\n\n revision = int(release['commit'].attrib['revision'])\n distilled = { 'name': release['name'].text }\n for e in release['commit']:\n distilled[e.tag] = e.text\n\n releases[revision] = distilled\n latest_revision = max(latest_revision, revision)\n\n return (releases, latest_revision)\n"
] |
# Version: 0.10+
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, and 3.2, 3.3
[](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* run `versioneer-installer` in your source tree: this installs `versioneer.py`
* follow the instructions below (also in the `versioneer.py` docstring)
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Installation
First, decide on values for the following configuration variables:
* `VCS`:
The version control system you use.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file. If your project uses `src/myproject/__init__.py`, this
should be `src/myproject/_version.py`. This file should be checked in to
your VCS as usual: the copy created below by `setup.py versioneer` will
include code that parses expanded VCS keywords in generated tarballs. The
'build' and 'sdist' commands will replace it with a copy that has just the
calculated version string.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
If you're using Subversion, than you'll need to declare the URL for your
repository's tag path:
versioneer.vcs_settings['svn'] = { 'tag_url': 'https://svn_host/svn/repo/tags' }
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and
will modify your `__init__.py` to define `__version__` (by calling a
function from `_version.py`). It will also modify your `MANIFEST.in` to
include both `versioneer.py` and the generated `_version.py` in sdist
tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
this uses the output of `git describe --tags --dirty --always` but strips
the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
that this commit is two revisions ("-2-") beyond the "0.11" tag. For
released software (exactly equal to a known tag), the identifier will only
contain the stripped tag, e.g. "0.11".
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
In the future, this will also include a
[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
for a hash-based revision id), but is safe to use in a `setup.py`
"`version=`" argument. It also enables tools like *pip* to compare version
strings and evaluate compatibility constraint declarations.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version = get_versions()['version']
del get_versions
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* re-run `versioneer-installer` in your source tree to replace `versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
import os, sys, re
from os import path
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
# these configuration settings will be overridden by setup.py after it
# imports us
VCS = None
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
version_string_template = "%(default)s"
vcs_settings = {}
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather
# then returning a None. It's almost always preferable that it would default to
# being a terminal error unles specifically caught (rather than vice versa).
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.10+ (https://github.com/warner/python-versioneer)
"""This text is put at the top of _version.py, and can be keyword-replaced with
version information by the VCS.
"""
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full_revisionid = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_short_revisionid = "%(DOLLAR)sFormat:%%h%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
version_string_template = "%(VERSION_STRING_TEMPLATE)s"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather
# then returning a None. It's almost always preferable that it would default to
# being a terminal error unles specifically caught (rather than vice versa).
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
"""Return a dictionary of values derived from the name of our parent
directory (useful when a thoughtfully-named directory is created from an
archive). This is the fourth attempt to find information by get_versions().
"""
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):]
return { "describe": version,
"long": version,
"pep440": version,
}
import re
def git_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs) as f:
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full_revisionid ="):
mo = re.search(r'=\\s*"(.*)"', line)
if mo:
keywords["full_revisionid"] = mo.group(1)
if line.strip().startswith("git_short_revisionid ="):
mo = re.search(r'=\\s*"(.*)"', line)
if mo:
keywords["short_revisionid"] = mo.group(1)
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
shortest_tag = None
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
shortest_tag = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% shortest_tag)
break
versions = {
"full_revisionid": keywords["full_revisionid"].strip(),
"short_revisionid": keywords["short_revisionid"].strip(),
"dirty": False, "dash_dirty": "",
"closest_tag": shortest_tag,
"closest_tag_or_zero": shortest_tag or "0",
# "distance" is not provided: cannot deduce from keyword expansion
}
if not shortest_tag and verbose:
print("no suitable tags, using full revision id")
composite = shortest_tag or versions["full_revisionid"]
versions["describe"] = composite
versions["long"] = composite
versions["default"] = composite
versions["pep440"] = composite
return versions
import re
import sys
import os.path
def git_versions_from_vcs(tag_prefix, root, verbose=False):
"""Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions().
"""
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
versions = {}
full_revisionid = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_revisionid is None:
return {}
versions["full_revisionid"] = full_revisionid.strip()
d = run_command(GITS,
["describe", "--tags", "--dirty", "--always", "--long"],
cwd=root)
if d is None:
return {}
d = d.strip()
# "TAG-DIST-gHASH[-dirty]" , where DIST might be "0"
# or just "HASH[-dirty]" if there are no ancestor tags
versions["long"] = d
mo1 = re.search(r"^(.*)-(\\d+)-g([0-9a-f]+)(-dirty)?$", d)
mo2 = re.search(r"^([0-9a-f]+)(-dirty)?$", d)
if mo1:
rawtag = mo1.group(1)
if not rawtag.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (rawtag, tag_prefix))
return {}
tag = rawtag[len(tag_prefix):]
versions["closest_tag"] = tag
versions["distance"] = int(mo1.group(2))
versions["short_revisionid"] = mo1.group(3)
versions["dirty"] = bool(mo1.group(4))
versions["pep440"] = tag
if versions["distance"]:
versions["describe"] = d
versions["pep440"] += ".post%%d" %% versions["distance"]
else:
versions["describe"] = tag
if versions["dirty"]:
versions["describe"] += "-dirty"
if versions["dirty"]:
# not strictly correct, as X.dev0 sorts "earlier" than X, but we
# need some way to distinguish the two. You shouldn't be shipping
# -dirty code anyways.
versions["pep440"] += ".dev0"
versions["default"] = versions["describe"]
elif mo2: # no ancestor tags
versions["closest_tag"] = None
versions["short_revisionid"] = mo2.group(1)
versions["dirty"] = bool(mo2.group(2))
# count revisions to compute ["distance"]
commits = run_command(GITS, ["rev-list", "--count", "HEAD"], cwd=root)
if commits is None:
return {}
versions["distance"] = int(commits.strip())
versions["pep440"] = "0"
if versions["distance"]:
versions["pep440"] += ".post%%d" %% versions["distance"]
if versions["dirty"]:
versions["pep440"] += ".dev0" # same concern as above
versions["describe"] = d
versions["default"] = "0-%%d-g%%s" %% (versions["distance"], d)
else:
return {}
versions["dash_dirty"] = "-dirty" if versions["dirty"] else ""
versions["closest_tag_or_zero"] = versions["closest_tag"] or "0"
if versions["distance"] == 0:
versions["dash_distance"] = ""
else:
versions["dash_distance"] = "-%%d" %% versions["distance"]
return versions
import os
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
"""This variation of get_versions() will be used in _version.py ."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames,
"full_revisionid": git_full_revisionid,
"short_revisionid": git_short_revisionid }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
# TODO(dustin): Shouldn't this always loop until it fails?
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
import re
def git_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs) as f:
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full_revisionid ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full_revisionid"] = mo.group(1)
if line.strip().startswith("git_short_revisionid ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["short_revisionid"] = mo.group(1)
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
shortest_tag = None
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
shortest_tag = ref[len(tag_prefix):]
if verbose:
print("picking %s" % shortest_tag)
break
versions = {
"full_revisionid": keywords["full_revisionid"].strip(),
"short_revisionid": keywords["short_revisionid"].strip(),
"dirty": False, "dash_dirty": "",
"closest_tag": shortest_tag,
"closest_tag_or_zero": shortest_tag or "0",
# "distance" is not provided: cannot deduce from keyword expansion
}
if not shortest_tag and verbose:
print("no suitable tags, using full revision id")
composite = shortest_tag or versions["full_revisionid"]
versions["describe"] = composite
versions["long"] = composite
versions["default"] = composite
versions["pep440"] = composite
return versions
import re
import sys
import os.path
def git_versions_from_vcs(tag_prefix, root, verbose=False):
"""Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions().
"""
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
versions = {}
full_revisionid = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_revisionid is None:
return {}
versions["full_revisionid"] = full_revisionid.strip()
d = run_command(GITS,
["describe", "--tags", "--dirty", "--always", "--long"],
cwd=root)
if d is None:
return {}
d = d.strip()
# "TAG-DIST-gHASH[-dirty]" , where DIST might be "0"
# or just "HASH[-dirty]" if there are no ancestor tags
versions["long"] = d
mo1 = re.search(r"^(.*)-(\d+)-g([0-9a-f]+)(-dirty)?$", d)
mo2 = re.search(r"^([0-9a-f]+)(-dirty)?$", d)
if mo1:
rawtag = mo1.group(1)
if not rawtag.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (rawtag, tag_prefix))
return {}
tag = rawtag[len(tag_prefix):]
versions["closest_tag"] = tag
versions["distance"] = int(mo1.group(2))
versions["short_revisionid"] = mo1.group(3)
versions["dirty"] = bool(mo1.group(4))
versions["pep440"] = tag
if versions["distance"]:
versions["describe"] = d
versions["pep440"] += ".post%d" % versions["distance"]
else:
versions["describe"] = tag
if versions["dirty"]:
versions["describe"] += "-dirty"
if versions["dirty"]:
# not strictly correct, as X.dev0 sorts "earlier" than X, but we
# need some way to distinguish the two. You shouldn't be shipping
# -dirty code anyways.
versions["pep440"] += ".dev0"
versions["default"] = versions["describe"]
elif mo2: # no ancestor tags
versions["closest_tag"] = None
versions["short_revisionid"] = mo2.group(1)
versions["dirty"] = bool(mo2.group(2))
# count revisions to compute ["distance"]
commits = run_command(GITS, ["rev-list", "--count", "HEAD"], cwd=root)
if commits is None:
return {}
versions["distance"] = int(commits.strip())
versions["pep440"] = "0"
if versions["distance"]:
versions["pep440"] += ".post%d" % versions["distance"]
if versions["dirty"]:
versions["pep440"] += ".dev0" # same concern as above
versions["describe"] = d
versions["default"] = "0-%d-g%s" % (versions["distance"], d)
else:
return {}
versions["dash_dirty"] = "-dirty" if versions["dirty"] else ""
versions["closest_tag_or_zero"] = versions["closest_tag"] or "0"
if versions["distance"] == 0:
versions["dash_distance"] = ""
else:
versions["dash_distance"] = "-%d" % versions["distance"]
return versions
import os.path
import sys
def git_do_vcs_install(manifest_in, versionfile_source, ipy):
"""The versioneer.py file was just written. Do any VCS-specific logic,
here.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source, ipy]
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes") as f:
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
except EnvironmentError:
pass
if not present:
with open(".gitattributes", "a+") as f:
f.write("%s export-subst\n" % versionfile_source)
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
LONG_VERSION_PY['svn'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.10+ (https://github.com/warner/python-versioneer)
"""This text is put at the top of _version.py, and can be keyword-replaced with
version information by the VCS.
"""
svn_revision = "%(DOLLAR)sRevision%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
version_string_template = "%(VERSION_STRING_TEMPLATE)s"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather
# then returning a None. It's almost always preferable that it would default to
# being a terminal error unles specifically caught (rather than vice versa).
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
"""Return a dictionary of values derived from the name of our parent
directory (useful when a thoughtfully-named directory is created from an
archive). This is the fourth attempt to find information by get_versions().
"""
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):]
return { "describe": version,
"long": version,
"pep440": version,
}
def svn_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
return {} #{ 'revision': svn_revision }
# TODO(dustin): Needs to be tested.
def svn_versions_from_keywords(keywords, tag_prefix, verbose=False):
return {} # { 'default': keywords['revision'] }
import re
import sys
import os.path
from xml.etree import ElementTree
def svn_parse_tag_xml(info_xml):
root = ElementTree.fromstring(info_xml)
release_list = root.find('list')
releases = {}
latest_revision = 0
for release in release_list:
release = dict([(e.tag, e) for e in release])
revision = int(release['commit'].attrib['revision'])
distilled = { 'name': release['name'].text }
for e in release['commit']:
distilled[e.tag] = e.text
releases[revision] = distilled
latest_revision = max(latest_revision, revision)
return (releases, latest_revision)
def svn_versions_from_vcs(tag_prefix, root, verbose=False):
"""Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions().
"""
if not os.path.exists(os.path.join(root, '.svn')):
if verbose:
print("no .svn in %%s." %% root)
return {}
current_module = sys.modules[__name__]
# If we're running from _version.py .
tag_url = getattr(current_module, 'svn_tag_url', None)
# If we're running from versioneer.py .
if tag_url is None:
vcs_settings = getattr(current_module, 'vcs_settings', None)
if vcs_settings is not None and \\
'svn' in vcs_settings and \\
'tag_url' in vcs_settings['svn']:
tag_url = vcs_settings['svn']['tag_url']
if tag_url is None:
raise ValueError("Please define VCS-specific 'tag_url' setting for "
"'svn' within 'versioneer'.")
svn_commands = ['svn']
info_xml = run_command(svn_commands, ['ls', '--xml', tag_url], cwd=root)
# TODO(dustin): This should raise an EnvironmentError upon failure.
if info_xml is None:
print("Error accessing Subversion for latest version.")
return {}
(releases, latest_revision) = svn_parse_tag_xml(info_xml)
release_info = releases[latest_revision]
release_name = release_info['name']
versions = { 'default': release_name,
'version': release_name,
'full': release_name }
# Examples of strings returned by Git.
#
# versions["closest_tag"]
# versions["distance"]
# versions["short_revisionid"]
# versions["dirty"]
# versions["pep440"]
# versions["describe"]
# versions["default"]
# versions["dash_dirty"]
# versions["closest_tag_or_zero"]
# versions["dash_distance"]
return versions
import os
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
"""This variation of get_versions() will be used in _version.py ."""
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# We isolated the following operations so that debugging would be more
# straightforward.
ver = svn_versions_from_vcs(tag_prefix, root, verbose)
if ver:
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
return ver
return default
'''
def svn_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
return {} #{ 'revision': svn_revision }
# TODO(dustin): Needs to be tested.
def svn_versions_from_keywords(keywords, tag_prefix, verbose=False):
return {} # { 'default': keywords['revision'] }
import re
import sys
import os.path
from xml.etree import ElementTree
def svn_parse_tag_xml(info_xml):
root = ElementTree.fromstring(info_xml)
release_list = root.find('list')
releases = {}
latest_revision = 0
for release in release_list:
release = dict([(e.tag, e) for e in release])
revision = int(release['commit'].attrib['revision'])
distilled = { 'name': release['name'].text }
for e in release['commit']:
distilled[e.tag] = e.text
releases[revision] = distilled
latest_revision = max(latest_revision, revision)
return (releases, latest_revision)
def svn_versions_from_vcs(tag_prefix, root, verbose=False):
"""Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions().
"""
if not os.path.exists(os.path.join(root, '.svn')):
if verbose:
print("no .svn in %s." % root)
return {}
current_module = sys.modules[__name__]
# If we're running from _version.py .
tag_url = getattr(current_module, 'svn_tag_url', None)
# If we're running from versioneer.py .
if tag_url is None:
vcs_settings = getattr(current_module, 'vcs_settings', None)
if vcs_settings is not None and \
'svn' in vcs_settings and \
'tag_url' in vcs_settings['svn']:
tag_url = vcs_settings['svn']['tag_url']
if tag_url is None:
raise ValueError("Please define VCS-specific 'tag_url' setting for "
"'svn' within 'versioneer'.")
svn_commands = ['svn']
info_xml = run_command(svn_commands, ['ls', '--xml', tag_url], cwd=root)
# TODO(dustin): This should raise an EnvironmentError upon failure.
if info_xml is None:
print("Error accessing Subversion for latest version.")
return {}
(releases, latest_revision) = svn_parse_tag_xml(info_xml)
release_info = releases[latest_revision]
release_name = release_info['name']
versions = { 'default': release_name,
'version': release_name,
'full': release_name }
# Examples of strings returned by Git.
#
# versions["closest_tag"]
# versions["distance"]
# versions["short_revisionid"]
# versions["dirty"]
# versions["pep440"]
# versions["describe"]
# versions["default"]
# versions["dash_dirty"]
# versions["closest_tag_or_zero"]
# versions["dash_distance"]
return versions
import os.path
import sys
def svn_do_vcs_install(manifest_in, versionfile_source, ipy):
"""The versioneer.py file was just written. Do any VCS-specific logic,
here. This essentially occurs as a post-install step (useless for any
validation).
"""
pass
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
"""Return a dictionary of values derived from the name of our parent
directory (useful when a thoughtfully-named directory is created from an
archive). This is the fourth attempt to find information by get_versions().
"""
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):]
return { "describe": version,
"long": version,
"pep440": version,
}
SHORT_VERSION_PY_COMMENT = """
# This file was generated by 'versioneer.py' (0.10+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
"""
SHORT_VERSION_PY_TRAILER = """
def get_versions(default={}, verbose=False):
return versions
def get_version():
return version_string_template %% versions
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
"""Return a dictionary of values derived from _version.py . This is the
second attempt to find information by get_versions().
"""
versions = {}
try:
with open(filename) as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def build_short_version_py(versions):
out = []
out.append(SHORT_VERSION_PY_COMMENT)
out.append("versions = ")
out.append(repr(versions))
out.append("\n\n")
out.append(SHORT_VERSION_PY_TRAILER)
return "".join(out)
import sys
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_versions(default=DEFAULT, verbose=False):
"""This variation of get_versions() will be used in versioneer.py ."""
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
# Try to get the version info from the VCS-specific replacement keywords.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose: print("got version from expanded keyword %s" % ver)
return ver
# Try to get the version info from _version.py .
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
# Try to get the version info from the VCS, directly.
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from VCS %s" % ver)
return ver
# Try to get the version info from the directory's naming.
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % default)
return default
def get_version(verbose=False):
return version_string_template % get_versions(verbose=verbose)
def get_vcs_code():
assert VCS is not None, "please set versioneer.VCS"
long_ = LONG_VERSION_PY[VCS]
complete = long_ % { "DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
"VERSION_STRING_TEMPLATE": version_string_template }
complete = complete.replace('\\\\', '\\')
try:
vcs_settings[VCS]
except KeyError:
pass
else:
new_lines = []
for k, v in vcs_settings[VCS].items():
value_escaped = v.replace("\\", "\\\\").replace("\"", "\\\"")
new_lines.append("%s_%s = \"%s\"" % (VCS, k, value_escaped))
# Add VCS-specific assignments to top of _version.py .
# TODO(dustin): We might want to put these below the file-version and comments.
complete = "\n".join(new_lines) + "\n" + complete
return complete
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = ["everything", "verbose"]
def initialize_options(self):
self.everything = False
self.verbose = False
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=self.verbose)
print("Version is currently: %s" % ver)
if self.everything:
versions = versioneer.get_versions(verbose=self.verbose)
for k in sorted(versions):
print("%s:%s" % (k, versions[k]))
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(build_short_version_py(versions) % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(build_short_version_py(versions) % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
f.write(get_vcs_code())
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["describe"] # XXX
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
# TODO(dustin): Fixed bug by defining this. Can we reduce the two separate
# calls in this class to a single one?
versions = get_versions(verbose=True)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(build_short_version_py(versions) % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['default']
del get_versions
"""
class cmd_versioneer(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Create the versioneer.py file."""
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
f.write(get_vcs_code())
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install_f = getattr(sys.modules[__name__], VCS + '_do_vcs_install')
do_vcs_install_f(manifest_in, versionfile_source, ipy)
def get_cmdclass():
"""Returns a mapping of subcommand handlers for setup.py ."""
cmds = {'version': cmd_version,
'versioneer': cmd_versioneer,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
|
dsoprea/PySecure | versioneer.py | get_versions | python | def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
# Try to get the version info from the VCS-specific replacement keywords.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose: print("got version from expanded keyword %s" % ver)
return ver
# Try to get the version info from _version.py .
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
# Try to get the version info from the VCS, directly.
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from VCS %s" % ver)
return ver
# Try to get the version info from the directory's naming.
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % default)
return default | This variation of get_versions() will be used in versioneer.py . | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/versioneer.py#L1204-L1263 | null |
# Version: 0.10+
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, and 3.2, 3.3
[](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* run `versioneer-installer` in your source tree: this installs `versioneer.py`
* follow the instructions below (also in the `versioneer.py` docstring)
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Installation
First, decide on values for the following configuration variables:
* `VCS`:
The version control system you use.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file. If your project uses `src/myproject/__init__.py`, this
should be `src/myproject/_version.py`. This file should be checked in to
your VCS as usual: the copy created below by `setup.py versioneer` will
include code that parses expanded VCS keywords in generated tarballs. The
'build' and 'sdist' commands will replace it with a copy that has just the
calculated version string.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
If you're using Subversion, than you'll need to declare the URL for your
repository's tag path:
versioneer.vcs_settings['svn'] = { 'tag_url': 'https://svn_host/svn/repo/tags' }
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and
will modify your `__init__.py` to define `__version__` (by calling a
function from `_version.py`). It will also modify your `MANIFEST.in` to
include both `versioneer.py` and the generated `_version.py` in sdist
tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
this uses the output of `git describe --tags --dirty --always` but strips
the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
that this commit is two revisions ("-2-") beyond the "0.11" tag. For
released software (exactly equal to a known tag), the identifier will only
contain the stripped tag, e.g. "0.11".
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
In the future, this will also include a
[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
for a hash-based revision id), but is safe to use in a `setup.py`
"`version=`" argument. It also enables tools like *pip* to compare version
strings and evaluate compatibility constraint declarations.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version = get_versions()['version']
del get_versions
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* re-run `versioneer-installer` in your source tree to replace `versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
import os, sys, re
from os import path
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
# these configuration settings will be overridden by setup.py after it
# imports us
VCS = None
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
version_string_template = "%(default)s"
vcs_settings = {}
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather
# then returning a None. It's almost always preferable that it would default to
# being a terminal error unles specifically caught (rather than vice versa).
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.10+ (https://github.com/warner/python-versioneer)
"""This text is put at the top of _version.py, and can be keyword-replaced with
version information by the VCS.
"""
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full_revisionid = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_short_revisionid = "%(DOLLAR)sFormat:%%h%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
version_string_template = "%(VERSION_STRING_TEMPLATE)s"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather
# then returning a None. It's almost always preferable that it would default to
# being a terminal error unles specifically caught (rather than vice versa).
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
"""Return a dictionary of values derived from the name of our parent
directory (useful when a thoughtfully-named directory is created from an
archive). This is the fourth attempt to find information by get_versions().
"""
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):]
return { "describe": version,
"long": version,
"pep440": version,
}
import re
def git_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs) as f:
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full_revisionid ="):
mo = re.search(r'=\\s*"(.*)"', line)
if mo:
keywords["full_revisionid"] = mo.group(1)
if line.strip().startswith("git_short_revisionid ="):
mo = re.search(r'=\\s*"(.*)"', line)
if mo:
keywords["short_revisionid"] = mo.group(1)
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
shortest_tag = None
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
shortest_tag = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% shortest_tag)
break
versions = {
"full_revisionid": keywords["full_revisionid"].strip(),
"short_revisionid": keywords["short_revisionid"].strip(),
"dirty": False, "dash_dirty": "",
"closest_tag": shortest_tag,
"closest_tag_or_zero": shortest_tag or "0",
# "distance" is not provided: cannot deduce from keyword expansion
}
if not shortest_tag and verbose:
print("no suitable tags, using full revision id")
composite = shortest_tag or versions["full_revisionid"]
versions["describe"] = composite
versions["long"] = composite
versions["default"] = composite
versions["pep440"] = composite
return versions
import re
import sys
import os.path
def git_versions_from_vcs(tag_prefix, root, verbose=False):
"""Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions().
"""
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
versions = {}
full_revisionid = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_revisionid is None:
return {}
versions["full_revisionid"] = full_revisionid.strip()
d = run_command(GITS,
["describe", "--tags", "--dirty", "--always", "--long"],
cwd=root)
if d is None:
return {}
d = d.strip()
# "TAG-DIST-gHASH[-dirty]" , where DIST might be "0"
# or just "HASH[-dirty]" if there are no ancestor tags
versions["long"] = d
mo1 = re.search(r"^(.*)-(\\d+)-g([0-9a-f]+)(-dirty)?$", d)
mo2 = re.search(r"^([0-9a-f]+)(-dirty)?$", d)
if mo1:
rawtag = mo1.group(1)
if not rawtag.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (rawtag, tag_prefix))
return {}
tag = rawtag[len(tag_prefix):]
versions["closest_tag"] = tag
versions["distance"] = int(mo1.group(2))
versions["short_revisionid"] = mo1.group(3)
versions["dirty"] = bool(mo1.group(4))
versions["pep440"] = tag
if versions["distance"]:
versions["describe"] = d
versions["pep440"] += ".post%%d" %% versions["distance"]
else:
versions["describe"] = tag
if versions["dirty"]:
versions["describe"] += "-dirty"
if versions["dirty"]:
# not strictly correct, as X.dev0 sorts "earlier" than X, but we
# need some way to distinguish the two. You shouldn't be shipping
# -dirty code anyways.
versions["pep440"] += ".dev0"
versions["default"] = versions["describe"]
elif mo2: # no ancestor tags
versions["closest_tag"] = None
versions["short_revisionid"] = mo2.group(1)
versions["dirty"] = bool(mo2.group(2))
# count revisions to compute ["distance"]
commits = run_command(GITS, ["rev-list", "--count", "HEAD"], cwd=root)
if commits is None:
return {}
versions["distance"] = int(commits.strip())
versions["pep440"] = "0"
if versions["distance"]:
versions["pep440"] += ".post%%d" %% versions["distance"]
if versions["dirty"]:
versions["pep440"] += ".dev0" # same concern as above
versions["describe"] = d
versions["default"] = "0-%%d-g%%s" %% (versions["distance"], d)
else:
return {}
versions["dash_dirty"] = "-dirty" if versions["dirty"] else ""
versions["closest_tag_or_zero"] = versions["closest_tag"] or "0"
if versions["distance"] == 0:
versions["dash_distance"] = ""
else:
versions["dash_distance"] = "-%%d" %% versions["distance"]
return versions
import os
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
"""This variation of get_versions() will be used in _version.py ."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames,
"full_revisionid": git_full_revisionid,
"short_revisionid": git_short_revisionid }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
# TODO(dustin): Shouldn't this always loop until it fails?
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
import re
def git_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs) as f:
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full_revisionid ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full_revisionid"] = mo.group(1)
if line.strip().startswith("git_short_revisionid ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["short_revisionid"] = mo.group(1)
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
shortest_tag = None
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
shortest_tag = ref[len(tag_prefix):]
if verbose:
print("picking %s" % shortest_tag)
break
versions = {
"full_revisionid": keywords["full_revisionid"].strip(),
"short_revisionid": keywords["short_revisionid"].strip(),
"dirty": False, "dash_dirty": "",
"closest_tag": shortest_tag,
"closest_tag_or_zero": shortest_tag or "0",
# "distance" is not provided: cannot deduce from keyword expansion
}
if not shortest_tag and verbose:
print("no suitable tags, using full revision id")
composite = shortest_tag or versions["full_revisionid"]
versions["describe"] = composite
versions["long"] = composite
versions["default"] = composite
versions["pep440"] = composite
return versions
import re
import sys
import os.path
def git_versions_from_vcs(tag_prefix, root, verbose=False):
"""Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions().
"""
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
versions = {}
full_revisionid = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_revisionid is None:
return {}
versions["full_revisionid"] = full_revisionid.strip()
d = run_command(GITS,
["describe", "--tags", "--dirty", "--always", "--long"],
cwd=root)
if d is None:
return {}
d = d.strip()
# "TAG-DIST-gHASH[-dirty]" , where DIST might be "0"
# or just "HASH[-dirty]" if there are no ancestor tags
versions["long"] = d
mo1 = re.search(r"^(.*)-(\d+)-g([0-9a-f]+)(-dirty)?$", d)
mo2 = re.search(r"^([0-9a-f]+)(-dirty)?$", d)
if mo1:
rawtag = mo1.group(1)
if not rawtag.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (rawtag, tag_prefix))
return {}
tag = rawtag[len(tag_prefix):]
versions["closest_tag"] = tag
versions["distance"] = int(mo1.group(2))
versions["short_revisionid"] = mo1.group(3)
versions["dirty"] = bool(mo1.group(4))
versions["pep440"] = tag
if versions["distance"]:
versions["describe"] = d
versions["pep440"] += ".post%d" % versions["distance"]
else:
versions["describe"] = tag
if versions["dirty"]:
versions["describe"] += "-dirty"
if versions["dirty"]:
# not strictly correct, as X.dev0 sorts "earlier" than X, but we
# need some way to distinguish the two. You shouldn't be shipping
# -dirty code anyways.
versions["pep440"] += ".dev0"
versions["default"] = versions["describe"]
elif mo2: # no ancestor tags
versions["closest_tag"] = None
versions["short_revisionid"] = mo2.group(1)
versions["dirty"] = bool(mo2.group(2))
# count revisions to compute ["distance"]
commits = run_command(GITS, ["rev-list", "--count", "HEAD"], cwd=root)
if commits is None:
return {}
versions["distance"] = int(commits.strip())
versions["pep440"] = "0"
if versions["distance"]:
versions["pep440"] += ".post%d" % versions["distance"]
if versions["dirty"]:
versions["pep440"] += ".dev0" # same concern as above
versions["describe"] = d
versions["default"] = "0-%d-g%s" % (versions["distance"], d)
else:
return {}
versions["dash_dirty"] = "-dirty" if versions["dirty"] else ""
versions["closest_tag_or_zero"] = versions["closest_tag"] or "0"
if versions["distance"] == 0:
versions["dash_distance"] = ""
else:
versions["dash_distance"] = "-%d" % versions["distance"]
return versions
import os.path
import sys
def git_do_vcs_install(manifest_in, versionfile_source, ipy):
"""The versioneer.py file was just written. Do any VCS-specific logic,
here.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source, ipy]
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes") as f:
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
except EnvironmentError:
pass
if not present:
with open(".gitattributes", "a+") as f:
f.write("%s export-subst\n" % versionfile_source)
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
LONG_VERSION_PY['svn'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.10+ (https://github.com/warner/python-versioneer)
"""This text is put at the top of _version.py, and can be keyword-replaced with
version information by the VCS.
"""
svn_revision = "%(DOLLAR)sRevision%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
version_string_template = "%(VERSION_STRING_TEMPLATE)s"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
# TODO(dustin): Maybe we should contemplate raising a SystemError here, rather
# then returning a None. It's almost always preferable that it would default to
# being a terminal error unles specifically caught (rather than vice versa).
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
"""Return a dictionary of values derived from the name of our parent
directory (useful when a thoughtfully-named directory is created from an
archive). This is the fourth attempt to find information by get_versions().
"""
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):]
return { "describe": version,
"long": version,
"pep440": version,
}
def svn_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
return {} #{ 'revision': svn_revision }
# TODO(dustin): Needs to be tested.
def svn_versions_from_keywords(keywords, tag_prefix, verbose=False):
return {} # { 'default': keywords['revision'] }
import re
import sys
import os.path
from xml.etree import ElementTree
def svn_parse_tag_xml(info_xml):
root = ElementTree.fromstring(info_xml)
release_list = root.find('list')
releases = {}
latest_revision = 0
for release in release_list:
release = dict([(e.tag, e) for e in release])
revision = int(release['commit'].attrib['revision'])
distilled = { 'name': release['name'].text }
for e in release['commit']:
distilled[e.tag] = e.text
releases[revision] = distilled
latest_revision = max(latest_revision, revision)
return (releases, latest_revision)
def svn_versions_from_vcs(tag_prefix, root, verbose=False):
"""Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions().
"""
if not os.path.exists(os.path.join(root, '.svn')):
if verbose:
print("no .svn in %%s." %% root)
return {}
current_module = sys.modules[__name__]
# If we're running from _version.py .
tag_url = getattr(current_module, 'svn_tag_url', None)
# If we're running from versioneer.py .
if tag_url is None:
vcs_settings = getattr(current_module, 'vcs_settings', None)
if vcs_settings is not None and \\
'svn' in vcs_settings and \\
'tag_url' in vcs_settings['svn']:
tag_url = vcs_settings['svn']['tag_url']
if tag_url is None:
raise ValueError("Please define VCS-specific 'tag_url' setting for "
"'svn' within 'versioneer'.")
svn_commands = ['svn']
info_xml = run_command(svn_commands, ['ls', '--xml', tag_url], cwd=root)
# TODO(dustin): This should raise an EnvironmentError upon failure.
if info_xml is None:
print("Error accessing Subversion for latest version.")
return {}
(releases, latest_revision) = svn_parse_tag_xml(info_xml)
release_info = releases[latest_revision]
release_name = release_info['name']
versions = { 'default': release_name,
'version': release_name,
'full': release_name }
# Examples of strings returned by Git.
#
# versions["closest_tag"]
# versions["distance"]
# versions["short_revisionid"]
# versions["dirty"]
# versions["pep440"]
# versions["describe"]
# versions["default"]
# versions["dash_dirty"]
# versions["closest_tag_or_zero"]
# versions["dash_distance"]
return versions
import os
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
"""This variation of get_versions() will be used in _version.py ."""
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# We isolated the following operations so that debugging would be more
# straightforward.
ver = svn_versions_from_vcs(tag_prefix, root, verbose)
if ver:
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
return ver
return default
'''
def svn_get_keywords(versionfile_abs):
"""Return a dictionary of values replaced by the VCS, automatically. This
is the first attempt to find information by get_versions().
"""
return {} #{ 'revision': svn_revision }
# TODO(dustin): Needs to be tested.
def svn_versions_from_keywords(keywords, tag_prefix, verbose=False):
return {} # { 'default': keywords['revision'] }
import re
import sys
import os.path
from xml.etree import ElementTree
def svn_parse_tag_xml(info_xml):
root = ElementTree.fromstring(info_xml)
release_list = root.find('list')
releases = {}
latest_revision = 0
for release in release_list:
release = dict([(e.tag, e) for e in release])
revision = int(release['commit'].attrib['revision'])
distilled = { 'name': release['name'].text }
for e in release['commit']:
distilled[e.tag] = e.text
releases[revision] = distilled
latest_revision = max(latest_revision, revision)
return (releases, latest_revision)
def svn_versions_from_vcs(tag_prefix, root, verbose=False):
"""Return a dictionary of values derived directly from the VCS. This is the
third attempt to find information by get_versions().
"""
if not os.path.exists(os.path.join(root, '.svn')):
if verbose:
print("no .svn in %s." % root)
return {}
current_module = sys.modules[__name__]
# If we're running from _version.py .
tag_url = getattr(current_module, 'svn_tag_url', None)
# If we're running from versioneer.py .
if tag_url is None:
vcs_settings = getattr(current_module, 'vcs_settings', None)
if vcs_settings is not None and \
'svn' in vcs_settings and \
'tag_url' in vcs_settings['svn']:
tag_url = vcs_settings['svn']['tag_url']
if tag_url is None:
raise ValueError("Please define VCS-specific 'tag_url' setting for "
"'svn' within 'versioneer'.")
svn_commands = ['svn']
info_xml = run_command(svn_commands, ['ls', '--xml', tag_url], cwd=root)
# TODO(dustin): This should raise an EnvironmentError upon failure.
if info_xml is None:
print("Error accessing Subversion for latest version.")
return {}
(releases, latest_revision) = svn_parse_tag_xml(info_xml)
release_info = releases[latest_revision]
release_name = release_info['name']
versions = { 'default': release_name,
'version': release_name,
'full': release_name }
# Examples of strings returned by Git.
#
# versions["closest_tag"]
# versions["distance"]
# versions["short_revisionid"]
# versions["dirty"]
# versions["pep440"]
# versions["describe"]
# versions["default"]
# versions["dash_dirty"]
# versions["closest_tag_or_zero"]
# versions["dash_distance"]
return versions
import os.path
import sys
def svn_do_vcs_install(manifest_in, versionfile_source, ipy):
"""The versioneer.py file was just written. Do any VCS-specific logic,
here. This essentially occurs as a post-install step (useless for any
validation).
"""
pass
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
"""Return a dictionary of values derived from the name of our parent
directory (useful when a thoughtfully-named directory is created from an
archive). This is the fourth attempt to find information by get_versions().
"""
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):]
return { "describe": version,
"long": version,
"pep440": version,
}
SHORT_VERSION_PY_COMMENT = """
# This file was generated by 'versioneer.py' (0.10+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
"""
SHORT_VERSION_PY_TRAILER = """
def get_versions(default={}, verbose=False):
return versions
def get_version():
return version_string_template %% versions
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
"""Return a dictionary of values derived from _version.py . This is the
second attempt to find information by get_versions().
"""
versions = {}
try:
with open(filename) as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def build_short_version_py(versions):
out = []
out.append(SHORT_VERSION_PY_COMMENT)
out.append("versions = ")
out.append(repr(versions))
out.append("\n\n")
out.append(SHORT_VERSION_PY_TRAILER)
return "".join(out)
import sys
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_version(verbose=False):
return version_string_template % get_versions(verbose=verbose)
def get_vcs_code():
assert VCS is not None, "please set versioneer.VCS"
long_ = LONG_VERSION_PY[VCS]
complete = long_ % { "DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
"VERSION_STRING_TEMPLATE": version_string_template }
complete = complete.replace('\\\\', '\\')
try:
vcs_settings[VCS]
except KeyError:
pass
else:
new_lines = []
for k, v in vcs_settings[VCS].items():
value_escaped = v.replace("\\", "\\\\").replace("\"", "\\\"")
new_lines.append("%s_%s = \"%s\"" % (VCS, k, value_escaped))
# Add VCS-specific assignments to top of _version.py .
# TODO(dustin): We might want to put these below the file-version and comments.
complete = "\n".join(new_lines) + "\n" + complete
return complete
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = ["everything", "verbose"]
def initialize_options(self):
self.everything = False
self.verbose = False
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=self.verbose)
print("Version is currently: %s" % ver)
if self.everything:
versions = versioneer.get_versions(verbose=self.verbose)
for k in sorted(versions):
print("%s:%s" % (k, versions[k]))
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(build_short_version_py(versions) % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(build_short_version_py(versions) % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
f.write(get_vcs_code())
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["describe"] # XXX
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
# TODO(dustin): Fixed bug by defining this. Can we reduce the two separate
# calls in this class to a single one?
versions = get_versions(verbose=True)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(build_short_version_py(versions) % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['default']
del get_versions
"""
class cmd_versioneer(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Create the versioneer.py file."""
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
f.write(get_vcs_code())
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install_f = getattr(sys.modules[__name__], VCS + '_do_vcs_install')
do_vcs_install_f(manifest_in, versionfile_source, ipy)
def get_cmdclass():
"""Returns a mapping of subcommand handlers for setup.py ."""
cmds = {'version': cmd_version,
'versioneer': cmd_versioneer,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
|
dsoprea/PySecure | versioneer.py | cmd_versioneer.run | python | def run(self):
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
f.write(get_vcs_code())
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install_f = getattr(sys.modules[__name__], VCS + '_do_vcs_install')
do_vcs_install_f(manifest_in, versionfile_source, ipy) | Create the versioneer.py file. | train | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/versioneer.py#L1378-L1435 | [
"def get_root():\n try:\n return os.path.dirname(os.path.abspath(__file__))\n except NameError:\n return os.path.dirname(os.path.abspath(sys.argv[0]))\n",
"def get_vcs_code():\n assert VCS is not None, \"please set versioneer.VCS\"\n long_ = LONG_VERSION_PY[VCS]\n complete = long_ % { \"DOLLAR\": \"$\",\n \"TAG_PREFIX\": tag_prefix,\n \"PARENTDIR_PREFIX\": parentdir_prefix,\n \"VERSIONFILE_SOURCE\": versionfile_source,\n \"VERSION_STRING_TEMPLATE\": version_string_template }\n\n complete = complete.replace('\\\\\\\\', '\\\\')\n\n try:\n vcs_settings[VCS]\n except KeyError:\n pass\n else:\n new_lines = []\n for k, v in vcs_settings[VCS].items():\n value_escaped = v.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\")\n new_lines.append(\"%s_%s = \\\"%s\\\"\" % (VCS, k, value_escaped))\n\n # Add VCS-specific assignments to top of _version.py .\n# TODO(dustin): We might want to put these below the file-version and comments.\n complete = \"\\n\".join(new_lines) + \"\\n\" + complete\n\n return complete\n"
] | class cmd_versioneer(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Create the versioneer.py file."""
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
f.write(get_vcs_code())
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install_f = getattr(sys.modules[__name__], VCS + '_do_vcs_install')
do_vcs_install_f(manifest_in, versionfile_source, ipy)
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.from_uri | python | def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode) | Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732. | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L96-L117 | null | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient._re_establish_use_watch | python | def _re_establish_use_watch(self):
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist | Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup. | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L140-L148 | null | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.list_tubes | python | def list_tubes(self):
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes | Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L246-L255 | [
"def yaml_load(fo):\n # yaml.safe_load will never use the C loader; we have to detect it ourselves\n if hasattr(yaml, 'CSafeLoader'):\n return yaml.load(fo, Loader=yaml.CSafeLoader)\n else:\n return yaml.safe_load(fo)\n",
"def _receive_data_with_prefix(self, prefix, sock):\n buf = b''\n target_len = len(prefix) + 28\n while b'\\r\\n' not in buf:\n message = sock.recv(target_len - len(buf))\n if not message:\n break\n buf += message\n if b' ' not in buf:\n error = buf.rstrip()\n raise BeanstalkError(error)\n first_word, rest = buf.split(b' ', 1)\n if first_word != prefix:\n raise BeanstalkError(first_word)\n return self._receive_data(sock, rest)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.stats | python | def stats(self):
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats | Return a dictionary with a bunch of instance-wide statistics
:rtype: dict | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L257-L266 | [
"def yaml_load(fo):\n # yaml.safe_load will never use the C loader; we have to detect it ourselves\n if hasattr(yaml, 'CSafeLoader'):\n return yaml.load(fo, Loader=yaml.CSafeLoader)\n else:\n return yaml.safe_load(fo)\n",
"def _receive_data_with_prefix(self, prefix, sock):\n buf = b''\n target_len = len(prefix) + 28\n while b'\\r\\n' not in buf:\n message = sock.recv(target_len - len(buf))\n if not message:\n break\n buf += message\n if b' ' not in buf:\n error = buf.rstrip()\n raise BeanstalkError(error)\n first_word, rest = buf.split(b' ', 1)\n if first_word != prefix:\n raise BeanstalkError(first_word)\n return self._receive_data(sock, rest)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.put_job | python | def put_job(self, data, pri=65536, delay=0, ttr=120):
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket) | Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L268-L298 | [
"def _receive_id(self, sock):\n status, gid = self._receive_name(sock)\n return status, int(gid)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.put_job_into | python | def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr) | Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L300-L324 | null | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.watchlist | python | def watchlist(self, tubes):
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube) | Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L331-L342 | [
"def watch(self, tube):\n \"\"\"Add the given tube to the watchlist.\n\n :param tube: Name of the tube to add to the watchlist\n\n Note: Initially, all connections are watching a tube named \"default\". If\n you manually call :func:`watch()`, we will un-watch the \"default\" tube.\n To keep it in your list, first call :func:`watch()` with the other tubes, then\n call :func:`watch()` with \"default\".\n \"\"\"\n with self._sock_ctx() as socket:\n self.desired_watchlist.add(tube)\n if tube not in self._watchlist:\n self._send_message('watch {0}'.format(tube), socket)\n self._receive_id(socket)\n self._watchlist.add(tube)\n if self.initial_watch:\n if tube != 'default':\n self.ignore('default')\n self.initial_watch = False\n",
"def ignore(self, tube):\n \"\"\"Remove the given tube from the watchlist.\n\n :param tube: Name of tube to remove from the watchlist\n\n If all tubes are :func:`ignore()` d, beanstalk will auto-add \"default\" to the watchlist\n to prevent the list from being empty. See :func:`watch()` for more unformation.\n \"\"\"\n with self._sock_ctx() as socket:\n if tube not in self._watchlist:\n raise KeyError(tube)\n if tube != 'default':\n self.desired_watchlist.remove(tube)\n if tube in self._watchlist:\n self._send_message('ignore {0}'.format(tube), socket)\n self._receive_id(socket)\n self._watchlist.remove(tube)\n if not self._watchlist:\n self._watchlist.add('default')\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.watch | python | def watch(self, tube):
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False | Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default". | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L344-L363 | [
"def _receive_id(self, sock):\n status, gid = self._receive_name(sock)\n return status, int(gid)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n",
"def ignore(self, tube):\n \"\"\"Remove the given tube from the watchlist.\n\n :param tube: Name of tube to remove from the watchlist\n\n If all tubes are :func:`ignore()` d, beanstalk will auto-add \"default\" to the watchlist\n to prevent the list from being empty. See :func:`watch()` for more unformation.\n \"\"\"\n with self._sock_ctx() as socket:\n if tube not in self._watchlist:\n raise KeyError(tube)\n if tube != 'default':\n self.desired_watchlist.remove(tube)\n if tube in self._watchlist:\n self._send_message('ignore {0}'.format(tube), socket)\n self._receive_id(socket)\n self._watchlist.remove(tube)\n if not self._watchlist:\n self._watchlist.add('default')\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.ignore | python | def ignore(self, tube):
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default') | Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation. | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L365-L383 | [
"def _receive_id(self, sock):\n status, gid = self._receive_name(sock)\n return status, int(gid)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.stats_job | python | def stats_job(self, job_id):
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status | Fetch statistics about a single job
:rtype: dict | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L385-L396 | [
"def yaml_load(fo):\n # yaml.safe_load will never use the C loader; we have to detect it ourselves\n if hasattr(yaml, 'CSafeLoader'):\n return yaml.load(fo, Loader=yaml.CSafeLoader)\n else:\n return yaml.safe_load(fo)\n",
"def _receive_data_with_prefix(self, prefix, sock):\n buf = b''\n target_len = len(prefix) + 28\n while b'\\r\\n' not in buf:\n message = sock.recv(target_len - len(buf))\n if not message:\n break\n buf += message\n if b' ' not in buf:\n error = buf.rstrip()\n raise BeanstalkError(error)\n first_word, rest = buf.split(b' ', 1)\n if first_word != prefix:\n raise BeanstalkError(first_word)\n return self._receive_data(sock, rest)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.stats_tube | python | def stats_tube(self, tube_name):
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body) | Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L398-L407 | [
"def yaml_load(fo):\n # yaml.safe_load will never use the C loader; we have to detect it ourselves\n if hasattr(yaml, 'CSafeLoader'):\n return yaml.load(fo, Loader=yaml.CSafeLoader)\n else:\n return yaml.safe_load(fo)\n",
"def _receive_data_with_prefix(self, prefix, sock):\n buf = b''\n target_len = len(prefix) + 28\n while b'\\r\\n' not in buf:\n message = sock.recv(target_len - len(buf))\n if not message:\n break\n buf += message\n if b' ' not in buf:\n error = buf.rstrip()\n raise BeanstalkError(error)\n first_word, rest = buf.split(b' ', 1)\n if first_word != prefix:\n raise BeanstalkError(first_word)\n return self._receive_data(sock, rest)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.reserve_job | python | def reserve_job(self, timeout=5):
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data) | Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L409-L424 | [
"def _receive_id_and_data_with_prefix(self, prefix, sock):\n buf = b''\n target_len = len(prefix) + 28\n while b'\\r\\n' not in buf:\n message = sock.recv(target_len - len(buf))\n if not message:\n break\n buf += message\n if b' ' not in buf:\n error = buf.rstrip()\n raise BeanstalkError(error)\n first_word, rest = buf.split(b' ', 1)\n if first_word != prefix:\n raise BeanstalkError(first_word)\n the_id, rest = rest.split(b' ', 1)\n return int(the_id), self._receive_data(sock, rest)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient._peek_common | python | def _peek_common(self, typ):
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data) | Common implementation for the peek_* functions | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L426-L431 | [
"def _receive_id_and_data_with_prefix(self, prefix, sock):\n buf = b''\n target_len = len(prefix) + 28\n while b'\\r\\n' not in buf:\n message = sock.recv(target_len - len(buf))\n if not message:\n break\n buf += message\n if b' ' not in buf:\n error = buf.rstrip()\n raise BeanstalkError(error)\n first_word, rest = buf.split(b' ', 1)\n if first_word != prefix:\n raise BeanstalkError(first_word)\n the_id, rest = rest.split(b' ', 1)\n return int(the_id), self._receive_data(sock, rest)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.delete_job | python | def delete_job(self, job_id):
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED') | Delete the given job id. The job must have been previously reserved by this connection | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L474-L480 | [
"def _receive_word(self, sock, *expected_words):\n message = sock.recv(1024).rstrip()\n if message not in expected_words:\n raise BeanstalkError(message)\n return message\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.bury_job | python | def bury_job(self, job_id, pri=65536):
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED') | Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L482-L493 | [
"def _receive_word(self, sock, *expected_words):\n message = sock.recv(1024).rstrip()\n if message not in expected_words:\n raise BeanstalkError(message)\n return message\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.release_job | python | def release_job(self, job_id, pri=65536, delay=0):
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED') | Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L495-L510 | [
"def _receive_word(self, sock, *expected_words):\n message = sock.recv(1024).rstrip()\n if message not in expected_words:\n raise BeanstalkError(message)\n return message\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.kick_job | python | def kick_job(self, job_id):
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED') | Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state. | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L512-L519 | [
"def _receive_word(self, sock, *expected_words):\n message = sock.recv(1024).rstrip()\n if message not in expected_words:\n raise BeanstalkError(message)\n return message\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.use | python | def use(self, tube):
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube | Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube. | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L521-L533 | [
"def _receive_name(self, sock):\n message = sock.recv(1024)\n if b' ' in message:\n status, rest = message.split(b' ', 1)\n return status, rest.rstrip()\n else:\n raise BeanstalkError(message.rstrip())\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.using | python | def using(self, tube):
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube) | Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L536-L559 | [
"def use(self, tube):\n \"\"\"Start producing jobs into the given tube.\n\n :param tube: Name of the tube to USE\n\n Subsequent calls to :func:`put_job` insert jobs into this tube.\n \"\"\"\n with self._sock_ctx() as socket:\n if self.current_tube != tube:\n self.desired_tube = tube\n self._send_message('use {0}'.format(tube), socket)\n self._receive_name(socket)\n self.current_tube = tube\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.kick_jobs | python | def kick_jobs(self, num_jobs):
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket) | Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can. | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L561-L570 | [
"def _receive_id(self, sock):\n status, gid = self._receive_name(sock)\n return status, int(gid)\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.pause_tube | python | def pause_tube(self, tube, delay=3600):
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED') | Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()` | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L572-L587 | [
"def _receive_word(self, sock, *expected_words):\n message = sock.recv(1024).rstrip()\n if message not in expected_words:\n raise BeanstalkError(message)\n return message\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def unpause_tube(self, tube):
"""Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()`
"""
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED')
|
EasyPost/pystalk | pystalk/client.py | BeanstalkClient.unpause_tube | python | def unpause_tube(self, tube):
with self._sock_ctx() as socket:
self._send_message('pause-tube {0} 0'.format(tube), socket)
return self._receive_word(socket, b'PAUSED') | Unpause a tube which was previously paused with :func:`pause_tube()`.
.. seealso::
:func:`pause_tube()` | train | https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L589-L598 | [
"def _receive_word(self, sock, *expected_words):\n message = sock.recv(1024).rstrip()\n if message not in expected_words:\n raise BeanstalkError(message)\n return message\n",
"def _send_message(self, message, sock):\n if isinstance(message, bytes):\n if not message.endswith(b'\\r\\n'):\n message += b'\\r\\n'\n return sock.sendall(message)\n else:\n if not message.endswith('\\r\\n'):\n message += '\\r\\n'\n return sock.sendall(message.encode('utf-8'))\n"
] | class BeanstalkClient(object):
"""Simple wrapper around the Beanstalk API.
:param host: Hostname or IP address to connect to
:type host: str
:param port: Port to connect to
:type port: int
:param socket_timeout: Timeout to set on the socket.
:type socket_timeout: float
:param auto_decode: Attempt to decode job bodies as UTF-8 when reading them
:type auto_decode: bool
Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk
functions.
.. warning::
Setting socket timeout to a value lower than the value you pass to blocking functions like
:func:`reserve_job()` will cause errors!
"""
def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client. Does not connect!"""
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._reset_state()
self.desired_tube = 'default'
self.desired_watchlist = set(['default'])
self.auto_decode = auto_decode
@classmethod
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
def _reset_state(self):
self._watchlist = set(['default'])
self.current_tube = 'default'
self.initial_watch = True
self.socket = None
def __repr__(self):
return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover
def __str__(self):
return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover
repr(self), self._watchlist, self.current_tube # pragma: no cover
) # pragma: no cover
@property
def _socket(self):
if self.socket is None:
self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout)
self._re_establish_use_watch()
return self.socket
def _re_establish_use_watch(self):
"""Call after a close/re-connect.
Automatically re-establishes the USE and WATCH configs previously setup.
"""
if self.current_tube != self.desired_tube:
self.use(self.desired_tube)
if self._watchlist != self.desired_watchlist:
self.watchlist = self.desired_watchlist
def close(self):
"""Close any open connection to the Beanstalk server.
This object is still safe to use after calling :func:`close()` ; it will automatically reconnect
and re-establish any open watches / uses.
It is a logic error to close the connection while you have a reserved job
"""
if self.socket is not None:
self.socket.close()
self._reset_state()
@contextmanager
def _sock_ctx(self):
yield self._socket
def _receive_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
return self._receive_data(sock, rest)
def _receive_id_and_data_with_prefix(self, prefix, sock):
buf = b''
target_len = len(prefix) + 28
while b'\r\n' not in buf:
message = sock.recv(target_len - len(buf))
if not message:
break
buf += message
if b' ' not in buf:
error = buf.rstrip()
raise BeanstalkError(error)
first_word, rest = buf.split(b' ', 1)
if first_word != prefix:
raise BeanstalkError(first_word)
the_id, rest = rest.split(b' ', 1)
return int(the_id), self._receive_data(sock, rest)
def _receive_data(self, sock, initial=None):
if initial is None:
initial = sock.recv(12)
byte_length, rest = initial.split(b'\r\n', 1)
byte_length = int(byte_length) + 2
buf = [rest]
bytes_read = len(rest)
while bytes_read < byte_length:
message = sock.recv(min(4096, byte_length - bytes_read))
if not message:
break
bytes_read += len(message)
buf.append(message)
bytez = b''.join(buf)[:-2]
if self.auto_decode:
return bytez.decode('utf-8')
else:
return bytez
def _receive_id(self, sock):
status, gid = self._receive_name(sock)
return status, int(gid)
def _receive_name(self, sock):
message = sock.recv(1024)
if b' ' in message:
status, rest = message.split(b' ', 1)
return status, rest.rstrip()
else:
raise BeanstalkError(message.rstrip())
def _receive_word(self, sock, *expected_words):
message = sock.recv(1024).rstrip()
if message not in expected_words:
raise BeanstalkError(message)
return message
def _send_message(self, message, sock):
if isinstance(message, bytes):
if not message.endswith(b'\r\n'):
message += b'\r\n'
return sock.sendall(message)
else:
if not message.endswith('\r\n'):
message += '\r\n'
return sock.sendall(message.encode('utf-8'))
def list_tubes(self):
"""Return a list of tubes that this beanstalk instance knows about
:rtype: list of tubes
"""
with self._sock_ctx() as sock:
self._send_message('list-tubes', sock)
body = self._receive_data_with_prefix(b'OK', sock)
tubes = yaml_load(body)
return tubes
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
@property
def watchlist(self):
return self._watchlist
@watchlist.setter
def watchlist(self, tubes):
"""Set the watchlist to the given tubes
:param tubes: A list of tubes to watch
Automatically un-watches any tubes that are not on the target list
"""
tubes = set(tubes)
for tube in tubes - self._watchlist:
self.watch(tube)
for tube in self._watchlist - tubes:
self.ignore(tube)
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default')
def stats_job(self, job_id):
"""Fetch statistics about a single job
:rtype: dict
"""
with self._sock_ctx() as socket:
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
self._send_message('stats-job {0}'.format(job_id), socket)
body = self._receive_data_with_prefix(b'OK', socket)
job_status = yaml_load(body)
return job_status
def stats_tube(self, tube_name):
"""Fetch statistics about a single tube
:param tube_name: Tube to fetch stats about
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats-tube {0}'.format(tube_name), socket)
body = self._receive_data_with_prefix(b'OK', socket)
return yaml_load(body)
def reserve_job(self, timeout=5):
"""Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available
:param timeout: Time to wait for a job, in seconds.
:type timeout: int
"""
timeout = int(timeout)
if self.socket_timeout is not None:
if timeout >= self.socket_timeout:
raise ValueError('reserve_job timeout must be < socket timeout')
if not self._watchlist:
raise ValueError('Select a tube or two before reserving a job')
with self._sock_ctx() as socket:
self._send_message('reserve-with-timeout {0}'.format(timeout), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket)
return Job(job_id, job_data)
def _peek_common(self, typ):
"""Common implementation for the peek_* functions"""
with self._sock_ctx() as socket:
self._send_message('peek-{0}'.format(typ), socket)
job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket)
return Job(job_id, job_data)
def peek_ready(self):
"""Peek at the job job on the ready queue.
:rtype: :class:`Job`
"""
return self._peek_common('ready')
def peek_delayed(self):
"""Peek at the job job on the delayed queue"""
return self._peek_common('delayed')
def peek_buried(self):
"""Peek at the top job on the buried queue"""
return self._peek_common('buried')
def _common_iter(self, kallable, error):
while True:
try:
job = kallable()
except BeanstalkError as e:
if e.message != error:
raise
break
yield job
def reserve_iter(self):
"""Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available"""
return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT')
def peek_ready_iter(self):
"""Peek at ready jobs in sequence"""
return self._common_iter(self.peek_ready, 'NOT_FOUND')
def peek_delayed_iter(self):
"""Peek at delayed jobs in sequence"""
return self._common_iter(self.peek_delayed, 'NOT_FOUND')
def peek_buried_iter(self):
"""Peek at buried jobs in sequence"""
return self._common_iter(self.peek_buried, 'NOT_FOUND')
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
def bury_job(self, job_id, pri=65536):
"""Mark the given job_id as buried. The job must have been previously reserved by this connection
:param job_id: Job to bury
:param pri: Priority for the newly-buried job. If not passed, will keep its current priority
:type pri: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('bury {0} {1}'.format(job_id, pri), socket)
return self._receive_word(socket, b'BURIED')
def release_job(self, job_id, pri=65536, delay=0):
"""Put a job back on the queue to be processed (indicating that you've aborted it)
You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`.
:param job_id: Job ID to return
:param pri: New priority (if not passed, will use old priority)
:type pri: int
:param delay: New delay for job (if not passed, will use 0)
:type delay: int
"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket)
return self._receive_word(socket, b'RELEASED', b'BURIED')
def kick_job(self, job_id):
"""Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to
the READY state."""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('kick-job {0}'.format(job_id), socket)
self._receive_word(socket, b'KICKED')
def use(self, tube):
"""Start producing jobs into the given tube.
:param tube: Name of the tube to USE
Subsequent calls to :func:`put_job` insert jobs into this tube.
"""
with self._sock_ctx() as socket:
if self.current_tube != tube:
self.desired_tube = tube
self._send_message('use {0}'.format(tube), socket)
self._receive_name(socket)
self.current_tube = tube
@contextmanager
def using(self, tube):
"""Context-manager to insert jobs into a specific tube
:param tube: Tube to insert to
Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube
.. seealso::
:func:`use()`
Change the default tube
:func:`put_job()`
Put a job into whatever the current tube is
:func:`put_job_into()`
Put a job into a specific tube
"""
try:
current_tube = self.current_tube
self.use(tube)
yield BeanstalkInsertingProxy(self, tube)
finally:
self.use(current_tube)
def kick_jobs(self, num_jobs):
"""Kick some number of jobs from the buried queue onto the ready queue.
:param num_jobs: Number of jobs to kick
:type num_jobs: int
If not that many jobs are in the buried queue, it will kick as many as it can."""
with self._sock_ctx() as socket:
self._send_message('kick {0}'.format(num_jobs), socket)
return self._receive_id(socket)
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
|
jspricke/python-remind | remind.py | rem2ics | python | def rem2ics():
# pylint: disable=maybe-no-member
from argparse import ArgumentParser, FileType
from dateutil.parser import parse
from sys import stdin, stdout
parser = ArgumentParser(description='Converter from Remind to iCalendar syntax.')
parser.add_argument('-s', '--startdate', type=lambda s: parse(s).date(),
default=date.today() - timedelta(weeks=12),
help='Start offset for remind call (default: -12 weeks)')
parser.add_argument('-m', '--month', type=int, default=15,
help='Number of month to generate calendar beginning wit startdate (default: 15)')
parser.add_argument('-a', '--alarm', type=int, default=-10,
help='Trigger time for the alarm before the event in minutes (default: -10)')
parser.add_argument('-z', '--zone',
help='Timezone of Remind file (default: local timezone)')
parser.add_argument('infile', nargs='?', default=expanduser('~/.reminders'),
help='The Remind file to process (default: ~/.reminders)')
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output iCalendar file (default: stdout)')
args = parser.parse_args()
zone = timezone(args.zone) if args.zone else None
if args.infile == '-':
remind = Remind(args.infile, zone, args.startdate, args.month, timedelta(minutes=args.alarm))
vobject = remind.stdin_to_vobject(stdin.read())
if vobject:
args.outfile.write(vobject.serialize())
else:
remind = Remind(args.infile, zone, args.startdate, args.month, timedelta(minutes=args.alarm))
args.outfile.write(remind.to_vobject().serialize()) | Command line tool to convert from Remind to iCalendar | train | https://github.com/jspricke/python-remind/blob/dda2aa8fc20b87b9c9fcbca2b67bce73911d05d1/remind.py#L594-L626 | [
"def to_vobject(self, filename=None, uid=None):\n \"\"\"Return iCal object of Remind lines\n If filename and UID are specified, the vObject only contains that event.\n If only a filename is specified, the vObject contains all events in the file.\n Otherwise the vObject contains all all objects of all files associated with the Remind object.\n\n filename -- the remind file\n uid -- the UID of the Remind line\n \"\"\"\n self._update()\n\n cal = iCalendar()\n if uid:\n self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))\n elif filename:\n for event in self._reminders[filename].values():\n self._gen_vevent(event, cal.add('vevent'))\n else:\n for filename in self._reminders:\n for event in self._reminders[filename].values():\n self._gen_vevent(event, cal.add('vevent'))\n return cal\n",
"def stdin_to_vobject(self, lines):\n \"\"\"Return iCal object of the Remind commands in lines\"\"\"\n cal = iCalendar()\n for event in self._parse_remind('-', lines)['-'].values():\n self._gen_vevent(event, cal.add('vevent'))\n return cal\n"
] | # Python library to convert between Remind and iCalendar
#
# Copyright (C) 2013-2018 Jochen Sprickerhof
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Python library to convert between Remind and iCalendar"""
from datetime import date, datetime, timedelta
from dateutil import rrule
from hashlib import md5
from os.path import getmtime, expanduser
from pytz import timezone
from socket import getfqdn
from subprocess import Popen, PIPE
from threading import Lock
from tzlocal import get_localzone
from vobject import readOne, iCalendar
class Remind(object):
"""Represents a collection of Remind files"""
def __init__(self, filename=expanduser('~/.reminders'), localtz=None,
startdate=date.today() - timedelta(weeks=12), month=15,
alarm=timedelta(minutes=-10)):
"""Constructor
filename -- the remind file (included files will be used as well)
localtz -- the timezone of the remind file
startdate -- the date to start parsing, will be passed to remind
month -- how many month to parse, will be passed to remind -s
"""
self._localtz = localtz if localtz else get_localzone()
self._filename = filename
self._startdate = startdate
self._month = month
self._lock = Lock()
self._reminders = {}
self._mtime = 0
self._alarm = alarm
self._update()
def _parse_remind(self, filename, lines=''):
"""Calls remind and parses the output into a dict
filename -- the remind file (included files will be used as well)
lines -- used as stdin to remind (filename will be set to -)
"""
files = {}
reminders = {}
if lines:
filename = '-'
files[filename] = lines
reminders[filename] = {}
cmd = ['remind', '-l', '-s%d' % self._month, '-b1', '-y', '-r',
filename, str(self._startdate)]
try:
rem = Popen(cmd, stdin=PIPE, stdout=PIPE).communicate(input=lines.encode('utf-8'))[0].decode('utf-8')
except OSError:
raise OSError('Error running: %s' % ' '.join(cmd))
rem = rem.splitlines()
for (fileinfo, line) in zip(rem[::2], rem[1::2]):
fileinfo = fileinfo.split()
src_filename = fileinfo[3]
if src_filename not in files:
# There is a race condition with the remind call above here.
# This could be solved by parsing the remind -de output,
# but I don't see an easy way to do that.
files[src_filename] = open(src_filename).readlines()
reminders[src_filename] = {}
mtime = getmtime(src_filename)
if mtime > self._mtime:
self._mtime = mtime
text = files[src_filename][int(fileinfo[2]) - 1]
event = self._parse_remind_line(line, text)
if event['uid'] in reminders[src_filename]:
reminders[src_filename][event['uid']]['dtstart'] += event['dtstart']
reminders[src_filename][event['uid']]['line'] += line
else:
reminders[src_filename][event['uid']] = event
reminders[src_filename][event['uid']]['line'] = line
# Find included files without reminders and add them to the file list
for source in files.values():
for line in source:
if line.startswith('include'):
new_file = line.split(' ')[1].strip()
if new_file not in reminders:
reminders[new_file] = {}
mtime = getmtime(new_file)
if mtime > self._mtime:
self._mtime = mtime
return reminders
@staticmethod
def _gen_description(text):
"""Convert from Remind MSG to iCal description
Opposite of _gen_msg()
"""
return text[text.rfind('%"') + 3:].replace('%_', '\n').replace('["["]', '[').strip()
def _parse_remind_line(self, line, text):
"""Parse a line of remind output into a dict
line -- the remind output
text -- the original remind input
"""
event = {}
line = line.split(None, 6)
dat = [int(f) for f in line[0].split('/')]
if line[4] != '*':
start = divmod(int(line[4]), 60)
event['dtstart'] = [datetime(dat[0], dat[1], dat[2], start[0], start[1], tzinfo=self._localtz)]
if line[3] != '*':
event['duration'] = timedelta(minutes=int(line[3]))
else:
event['dtstart'] = [date(dat[0], dat[1], dat[2])]
msg = ' '.join(line[5:]) if line[4] == '*' else line[6]
msg = msg.strip().replace('%_', '\n').replace('["["]', '[')
if ' at ' in msg:
(event['msg'], event['location']) = msg.rsplit(' at ', 1)
else:
event['msg'] = msg
if '%"' in text:
event['description'] = Remind._gen_description(text)
tags = line[2].split(',')
classes = ['PUBLIC', 'PRIVATE', 'CONFIDENTIAL']
for tag in tags[:-1]:
if tag in classes:
event['class'] = tag
event['categories'] = [tag for tag in tags[:-1] if tag not in classes]
event['uid'] = '%s@%s' % (tags[-1][7:], getfqdn())
return event
@staticmethod
def _interval(dates):
"""Return the distance between all dates and 0 if they are different"""
interval = (dates[1] - dates[0]).days
last = dates[0]
for dat in dates[1:]:
if (dat - last).days != interval:
return 0
last = dat
return interval
@staticmethod
def _gen_dtend_rrule(dtstarts, vevent):
"""Generate an rdate or rrule from a list of dates and add it to the vevent"""
interval = Remind._interval(dtstarts)
if interval > 0 and interval % 7 == 0:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.WEEKLY, interval=interval // 7, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 1:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, interval=interval, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 0:
if isinstance(dtstarts[0], datetime):
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, count=len(dtstarts)))
vevent.rruleset = rset
else:
vevent.add('dtend').value = dtstarts[-1] + timedelta(days=1)
else:
rset = rrule.rruleset()
if isinstance(dtstarts[0], datetime):
for dat in dtstarts:
rset.rdate(dat)
else:
for dat in dtstarts:
rset.rdate(datetime(dat.year, dat.month, dat.day))
# temporary set dtstart to a different date, so it's not
# removed from rset by python-vobject works around bug in
# Android:
# https://github.com/rfc2822/davdroid/issues/340
vevent.dtstart.value = dtstarts[0] - timedelta(days=1)
vevent.rruleset = rset
vevent.dtstart.value = dtstarts[0]
if not isinstance(dtstarts[0], datetime):
vevent.add('dtend').value = dtstarts[0] + timedelta(days=1)
def _gen_vevent(self, event, vevent):
"""Generate vevent from given event"""
vevent.add('dtstart').value = event['dtstart'][0]
vevent.add('dtstamp').value = datetime.fromtimestamp(self._mtime)
vevent.add('summary').value = event['msg']
vevent.add('uid').value = event['uid']
if 'class' in event:
vevent.add('class').value = event['class']
if 'categories' in event and len(event['categories']) > 0:
vevent.add('categories').value = event['categories']
if 'location' in event:
vevent.add('location').value = event['location']
if 'description' in event:
vevent.add('description').value = event['description']
if isinstance(event['dtstart'][0], datetime):
if self._alarm != timedelta():
valarm = vevent.add('valarm')
valarm.add('trigger').value = self._alarm
valarm.add('action').value = 'DISPLAY'
valarm.add('description').value = event['msg']
if 'duration' in event:
vevent.add('duration').value = event['duration']
else:
vevent.add('dtend').value = event['dtstart'][0]
elif len(event['dtstart']) == 1:
vevent.add('dtend').value = event['dtstart'][0] + timedelta(days=1)
if len(event['dtstart']) > 1:
Remind._gen_dtend_rrule(event['dtstart'], vevent)
def _update(self):
"""Reload Remind files if the mtime is newer"""
update = not self._reminders
with self._lock:
for fname in self._reminders:
if getmtime(fname) > self._mtime:
update = True
break
if update:
self._reminders = self._parse_remind(self._filename)
def get_filesnames(self):
"""All filenames parsed by remind (including included files)"""
self._update()
return list(self._reminders.keys())
@staticmethod
def _get_uid(line):
"""UID of a remind line"""
return '%s@%s' % (md5(line[:-1].encode('utf-8')).hexdigest(), getfqdn())
def get_uids(self, filename=None):
"""UIDs of all reminders in the file excluding included files
If a filename is specified, only it's UIDs are return, otherwise all.
filename -- the remind file
"""
self._update()
if filename:
if filename not in self._reminders:
return []
return self._reminders[filename].keys()
return [uid for uids in self._reminders.values() for uid in uids]
def to_vobject_etag(self, filename, uid):
"""Return iCal object and etag of one Remind entry
filename -- the remind file
uid -- the UID of the Remind line
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Remind entries in uids
filename -- the remind file
uids -- the UIDs of the Remind lines (all if None)
"""
self._update()
if not uids:
uids = self._reminders[filename]
items = []
for uid in uids:
cal = iCalendar()
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
etag = md5()
etag.update(self._reminders[filename][uid]['line'].encode("utf-8"))
items.append((uid, cal, '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return iCal object of Remind lines
If filename and UID are specified, the vObject only contains that event.
If only a filename is specified, the vObject contains all events in the file.
Otherwise the vObject contains all all objects of all files associated with the Remind object.
filename -- the remind file
uid -- the UID of the Remind line
"""
self._update()
cal = iCalendar()
if uid:
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
elif filename:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
else:
for filename in self._reminders:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
def stdin_to_vobject(self, lines):
"""Return iCal object of the Remind commands in lines"""
cal = iCalendar()
for event in self._parse_remind('-', lines)['-'].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
@staticmethod
def _parse_rdate(rdates):
"""Convert from iCal rdate to Remind trigdate syntax"""
trigdates = [rdate.strftime("trigdate()=='%Y-%m-%d'") for rdate in rdates]
return 'SATISFY [%s]' % '||'.join(trigdates)
@staticmethod
def _parse_rruleset(rruleset):
"""Convert from iCal rrule to Remind recurrence syntax"""
# pylint: disable=protected-access
if rruleset._rrule[0]._freq == 0:
return []
rep = []
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
rep.append('*1')
elif rruleset._rrule[0]._freq == rrule.DAILY:
rep.append('*%d' % rruleset._rrule[0]._interval)
elif rruleset._rrule[0]._freq == rrule.WEEKLY:
rep.append('*%d' % (7 * rruleset._rrule[0]._interval))
else:
return Remind._parse_rdate(rruleset._rrule[0])
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
daynums = set(range(7)) - set(rruleset._rrule[0]._byweekday)
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
days = [weekdays[day] for day in daynums]
rep.append('SKIP OMIT %s' % ' '.join(days))
if rruleset._rrule[0]._until:
rep.append(rruleset._rrule[0]._until.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
elif rruleset._rrule[0]._count:
rep.append(rruleset[-1].strftime('UNTIL %b %d %Y').replace(' 0', ' '))
return rep
@staticmethod
def _event_duration(vevent):
"""unify dtend and duration to the duration of the given vevent"""
if hasattr(vevent, 'dtend'):
return vevent.dtend.value - vevent.dtstart.value
elif hasattr(vevent, 'duration') and vevent.duration.value:
return vevent.duration.value
return timedelta(0)
@staticmethod
def _gen_msg(vevent, label, tail, sep):
"""Generate a Remind MSG from the given vevent.
Opposite of _gen_description()
"""
rem = ['MSG']
msg = []
if label:
msg.append(label)
if hasattr(vevent, 'summary') and vevent.summary.value:
msg.append(Remind._rem_clean(vevent.summary.value))
else:
msg.append('empty reminder')
if hasattr(vevent, 'location') and vevent.location.value:
msg.append('at %s' % Remind._rem_clean(vevent.location.value))
has_desc = hasattr(vevent, 'description') and vevent.description.value
if tail or has_desc:
rem.append('%%"%s%%"' % ' '.join(msg))
else:
rem.append(' '.join(msg))
if tail:
rem.append(tail)
if has_desc:
rem[-1] += sep + Remind._rem_clean(vevent.description.value)
return ' '.join(rem)
@staticmethod
def _rem_clean(rem):
"""Strip, transform newlines, and escape '[' in string so it's
acceptable as a remind entry."""
return rem.strip().replace('\n', '%_').replace('[', '["["]')
@staticmethod
def _abbr_tag(tag):
"""Transform a string so it's acceptable as a remind tag. """
return tag.replace(" ", "")[:48]
def to_remind(self, vevent, label=None, priority=None, tags=None, tail=None,
sep=" ", postdate=None, posttime=None):
"""Generate a Remind command from the given vevent"""
remind = ['REM']
trigdates = None
if hasattr(vevent, 'rrule'):
trigdates = Remind._parse_rruleset(vevent.rruleset)
dtstart = vevent.dtstart.value
# If we don't get timezone information, handle it as a naive datetime.
# See https://github.com/jspricke/python-remind/issues/2 for reference.
if isinstance(dtstart, datetime) and dtstart.tzinfo:
dtstart = dtstart.astimezone(self._localtz)
dtend = None
if hasattr(vevent, 'dtend'):
dtend = vevent.dtend.value
if isinstance(dtend, datetime) and dtend.tzinfo:
dtend = dtend.astimezone(self._localtz)
if not hasattr(vevent, 'rdate') and not isinstance(trigdates, str):
remind.append(dtstart.strftime('%b %d %Y').replace(' 0', ' '))
if postdate:
remind.append(postdate)
if priority:
remind.append('PRIORITY %s' % priority)
if isinstance(trigdates, list):
remind.extend(trigdates)
duration = Remind._event_duration(vevent)
if type(dtstart) is date and duration.days > 1:
remind.append('*1')
if dtend is not None:
dtend -= timedelta(days=1)
remind.append(dtend.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
if isinstance(dtstart, datetime):
remind.append(dtstart.strftime('AT %H:%M').replace(' 0', ' '))
if posttime:
remind.append(posttime)
if duration.total_seconds() > 0:
remind.append('DURATION %d:%02d' % divmod(duration.total_seconds() / 60, 60))
if hasattr(vevent, 'rdate'):
remind.append(Remind._parse_rdate(vevent.rdate.value))
elif isinstance(trigdates, str):
remind.append(trigdates)
if hasattr(vevent, 'class'):
remind.append('TAG %s' % Remind._abbr_tag(vevent.getChildValue('class')))
if tags:
remind.extend(['TAG %s' % Remind._abbr_tag(tag) for tag in tags])
if hasattr(vevent, 'categories_list'):
for categories in vevent.categories_list:
for category in categories.value:
remind.append('TAG %s' % Remind._abbr_tag(category))
remind.append(Remind._gen_msg(vevent, label, tail, sep))
return ' '.join(remind) + '\n'
def to_reminders(self, ical, label=None, priority=None, tags=None,
tail=None, sep=" ", postdate=None, posttime=None):
"""Return Remind commands for all events of a iCalendar"""
if not hasattr(ical, 'vevent_list'):
return ''
reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,
postdate, posttime)
for vevent in ical.vevent_list]
return ''.join(reminders)
def append(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
return self.append_vobject(readOne(ical), filename)
def append_vobject(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
with self._lock:
outdat = self.to_reminders(ical)
open(filename, 'a').write(outdat)
return Remind._get_uid(outdat)
def remove(self, uid, filename=None):
"""Remove the Remind command with the uid from the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(filename, 'w').writelines(rem)
break
def replace(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
return self.replace_vobject(uid, readOne(ical), filename)
def replace_vobject(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
rem[index] = self.to_reminders(ical)
new_uid = self._get_uid(rem[index])
open(filename, 'w').writelines(rem)
return new_uid
def move_vobject(self, uid, from_file, to_file):
"""Move the Remind command with the uid from from_file to to_file"""
if from_file not in self._reminders or to_file not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(from_file).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(from_file, 'w').writelines(rem)
open(to_file, 'a').write(line)
break
def get_meta(self):
"""Meta tags of the vObject collection"""
return {'tag': 'VCALENDAR', 'C:supported-calendar-component-set': 'VEVENT'}
def last_modified(self):
"""Last time the Remind files where parsed"""
self._update()
return self._mtime
def ics2rem():
"""Command line tool to convert from iCalendar to Remind"""
from argparse import ArgumentParser, FileType
from sys import stdin, stdout
parser = ArgumentParser(description='Converter from iCalendar to Remind syntax.')
parser.add_argument('-l', '--label', help='Label for every Remind entry')
parser.add_argument('-p', '--priority', type=int,
help='Priority for every Remind entry (0..9999)')
parser.add_argument('-t', '--tag', action='append',
help='Tag(s) for every Remind entry')
parser.add_argument('--tail',
help='Text to append to every remind summary, following final %%"')
parser.add_argument('--sep', default=" ",
help='String to separate summary (and tail) from description')
parser.add_argument('--postdate',
help='String to follow the date in every Remind entry. '
'Useful for entering "back" and "delta" fields (see man remind).')
parser.add_argument('--posttime',
help='String to follow the time in every timed Remind entry. '
'Useful for entering "tdelta" and "trepeat" fields (see man remind).')
parser.add_argument('-z', '--zone',
help='Timezone of Remind file (default: local timezone)')
parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin,
help='Input iCalendar file (default: stdin)')
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output Remind file (default: stdout)')
args = parser.parse_args()
zone = timezone(args.zone) if args.zone else None
vobject = readOne(args.infile.read())
rem = Remind(localtz=zone).to_reminders(
vobject, args.label, args.priority, args.tag, args.tail, args.sep,
args.postdate, args.posttime)
args.outfile.write(rem)
|
jspricke/python-remind | remind.py | ics2rem | python | def ics2rem():
from argparse import ArgumentParser, FileType
from sys import stdin, stdout
parser = ArgumentParser(description='Converter from iCalendar to Remind syntax.')
parser.add_argument('-l', '--label', help='Label for every Remind entry')
parser.add_argument('-p', '--priority', type=int,
help='Priority for every Remind entry (0..9999)')
parser.add_argument('-t', '--tag', action='append',
help='Tag(s) for every Remind entry')
parser.add_argument('--tail',
help='Text to append to every remind summary, following final %%"')
parser.add_argument('--sep', default=" ",
help='String to separate summary (and tail) from description')
parser.add_argument('--postdate',
help='String to follow the date in every Remind entry. '
'Useful for entering "back" and "delta" fields (see man remind).')
parser.add_argument('--posttime',
help='String to follow the time in every timed Remind entry. '
'Useful for entering "tdelta" and "trepeat" fields (see man remind).')
parser.add_argument('-z', '--zone',
help='Timezone of Remind file (default: local timezone)')
parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin,
help='Input iCalendar file (default: stdin)')
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output Remind file (default: stdout)')
args = parser.parse_args()
zone = timezone(args.zone) if args.zone else None
vobject = readOne(args.infile.read())
rem = Remind(localtz=zone).to_reminders(
vobject, args.label, args.priority, args.tag, args.tail, args.sep,
args.postdate, args.posttime)
args.outfile.write(rem) | Command line tool to convert from iCalendar to Remind | train | https://github.com/jspricke/python-remind/blob/dda2aa8fc20b87b9c9fcbca2b67bce73911d05d1/remind.py#L629-L664 | [
"def to_reminders(self, ical, label=None, priority=None, tags=None,\n tail=None, sep=\" \", postdate=None, posttime=None):\n \"\"\"Return Remind commands for all events of a iCalendar\"\"\"\n if not hasattr(ical, 'vevent_list'):\n return ''\n\n reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,\n postdate, posttime)\n for vevent in ical.vevent_list]\n return ''.join(reminders)\n"
] | # Python library to convert between Remind and iCalendar
#
# Copyright (C) 2013-2018 Jochen Sprickerhof
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Python library to convert between Remind and iCalendar"""
from datetime import date, datetime, timedelta
from dateutil import rrule
from hashlib import md5
from os.path import getmtime, expanduser
from pytz import timezone
from socket import getfqdn
from subprocess import Popen, PIPE
from threading import Lock
from tzlocal import get_localzone
from vobject import readOne, iCalendar
class Remind(object):
"""Represents a collection of Remind files"""
def __init__(self, filename=expanduser('~/.reminders'), localtz=None,
startdate=date.today() - timedelta(weeks=12), month=15,
alarm=timedelta(minutes=-10)):
"""Constructor
filename -- the remind file (included files will be used as well)
localtz -- the timezone of the remind file
startdate -- the date to start parsing, will be passed to remind
month -- how many month to parse, will be passed to remind -s
"""
self._localtz = localtz if localtz else get_localzone()
self._filename = filename
self._startdate = startdate
self._month = month
self._lock = Lock()
self._reminders = {}
self._mtime = 0
self._alarm = alarm
self._update()
def _parse_remind(self, filename, lines=''):
"""Calls remind and parses the output into a dict
filename -- the remind file (included files will be used as well)
lines -- used as stdin to remind (filename will be set to -)
"""
files = {}
reminders = {}
if lines:
filename = '-'
files[filename] = lines
reminders[filename] = {}
cmd = ['remind', '-l', '-s%d' % self._month, '-b1', '-y', '-r',
filename, str(self._startdate)]
try:
rem = Popen(cmd, stdin=PIPE, stdout=PIPE).communicate(input=lines.encode('utf-8'))[0].decode('utf-8')
except OSError:
raise OSError('Error running: %s' % ' '.join(cmd))
rem = rem.splitlines()
for (fileinfo, line) in zip(rem[::2], rem[1::2]):
fileinfo = fileinfo.split()
src_filename = fileinfo[3]
if src_filename not in files:
# There is a race condition with the remind call above here.
# This could be solved by parsing the remind -de output,
# but I don't see an easy way to do that.
files[src_filename] = open(src_filename).readlines()
reminders[src_filename] = {}
mtime = getmtime(src_filename)
if mtime > self._mtime:
self._mtime = mtime
text = files[src_filename][int(fileinfo[2]) - 1]
event = self._parse_remind_line(line, text)
if event['uid'] in reminders[src_filename]:
reminders[src_filename][event['uid']]['dtstart'] += event['dtstart']
reminders[src_filename][event['uid']]['line'] += line
else:
reminders[src_filename][event['uid']] = event
reminders[src_filename][event['uid']]['line'] = line
# Find included files without reminders and add them to the file list
for source in files.values():
for line in source:
if line.startswith('include'):
new_file = line.split(' ')[1].strip()
if new_file not in reminders:
reminders[new_file] = {}
mtime = getmtime(new_file)
if mtime > self._mtime:
self._mtime = mtime
return reminders
@staticmethod
def _gen_description(text):
"""Convert from Remind MSG to iCal description
Opposite of _gen_msg()
"""
return text[text.rfind('%"') + 3:].replace('%_', '\n').replace('["["]', '[').strip()
def _parse_remind_line(self, line, text):
"""Parse a line of remind output into a dict
line -- the remind output
text -- the original remind input
"""
event = {}
line = line.split(None, 6)
dat = [int(f) for f in line[0].split('/')]
if line[4] != '*':
start = divmod(int(line[4]), 60)
event['dtstart'] = [datetime(dat[0], dat[1], dat[2], start[0], start[1], tzinfo=self._localtz)]
if line[3] != '*':
event['duration'] = timedelta(minutes=int(line[3]))
else:
event['dtstart'] = [date(dat[0], dat[1], dat[2])]
msg = ' '.join(line[5:]) if line[4] == '*' else line[6]
msg = msg.strip().replace('%_', '\n').replace('["["]', '[')
if ' at ' in msg:
(event['msg'], event['location']) = msg.rsplit(' at ', 1)
else:
event['msg'] = msg
if '%"' in text:
event['description'] = Remind._gen_description(text)
tags = line[2].split(',')
classes = ['PUBLIC', 'PRIVATE', 'CONFIDENTIAL']
for tag in tags[:-1]:
if tag in classes:
event['class'] = tag
event['categories'] = [tag for tag in tags[:-1] if tag not in classes]
event['uid'] = '%s@%s' % (tags[-1][7:], getfqdn())
return event
@staticmethod
def _interval(dates):
"""Return the distance between all dates and 0 if they are different"""
interval = (dates[1] - dates[0]).days
last = dates[0]
for dat in dates[1:]:
if (dat - last).days != interval:
return 0
last = dat
return interval
@staticmethod
def _gen_dtend_rrule(dtstarts, vevent):
"""Generate an rdate or rrule from a list of dates and add it to the vevent"""
interval = Remind._interval(dtstarts)
if interval > 0 and interval % 7 == 0:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.WEEKLY, interval=interval // 7, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 1:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, interval=interval, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 0:
if isinstance(dtstarts[0], datetime):
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, count=len(dtstarts)))
vevent.rruleset = rset
else:
vevent.add('dtend').value = dtstarts[-1] + timedelta(days=1)
else:
rset = rrule.rruleset()
if isinstance(dtstarts[0], datetime):
for dat in dtstarts:
rset.rdate(dat)
else:
for dat in dtstarts:
rset.rdate(datetime(dat.year, dat.month, dat.day))
# temporary set dtstart to a different date, so it's not
# removed from rset by python-vobject works around bug in
# Android:
# https://github.com/rfc2822/davdroid/issues/340
vevent.dtstart.value = dtstarts[0] - timedelta(days=1)
vevent.rruleset = rset
vevent.dtstart.value = dtstarts[0]
if not isinstance(dtstarts[0], datetime):
vevent.add('dtend').value = dtstarts[0] + timedelta(days=1)
def _gen_vevent(self, event, vevent):
"""Generate vevent from given event"""
vevent.add('dtstart').value = event['dtstart'][0]
vevent.add('dtstamp').value = datetime.fromtimestamp(self._mtime)
vevent.add('summary').value = event['msg']
vevent.add('uid').value = event['uid']
if 'class' in event:
vevent.add('class').value = event['class']
if 'categories' in event and len(event['categories']) > 0:
vevent.add('categories').value = event['categories']
if 'location' in event:
vevent.add('location').value = event['location']
if 'description' in event:
vevent.add('description').value = event['description']
if isinstance(event['dtstart'][0], datetime):
if self._alarm != timedelta():
valarm = vevent.add('valarm')
valarm.add('trigger').value = self._alarm
valarm.add('action').value = 'DISPLAY'
valarm.add('description').value = event['msg']
if 'duration' in event:
vevent.add('duration').value = event['duration']
else:
vevent.add('dtend').value = event['dtstart'][0]
elif len(event['dtstart']) == 1:
vevent.add('dtend').value = event['dtstart'][0] + timedelta(days=1)
if len(event['dtstart']) > 1:
Remind._gen_dtend_rrule(event['dtstart'], vevent)
def _update(self):
"""Reload Remind files if the mtime is newer"""
update = not self._reminders
with self._lock:
for fname in self._reminders:
if getmtime(fname) > self._mtime:
update = True
break
if update:
self._reminders = self._parse_remind(self._filename)
def get_filesnames(self):
"""All filenames parsed by remind (including included files)"""
self._update()
return list(self._reminders.keys())
@staticmethod
def _get_uid(line):
"""UID of a remind line"""
return '%s@%s' % (md5(line[:-1].encode('utf-8')).hexdigest(), getfqdn())
def get_uids(self, filename=None):
"""UIDs of all reminders in the file excluding included files
If a filename is specified, only it's UIDs are return, otherwise all.
filename -- the remind file
"""
self._update()
if filename:
if filename not in self._reminders:
return []
return self._reminders[filename].keys()
return [uid for uids in self._reminders.values() for uid in uids]
def to_vobject_etag(self, filename, uid):
"""Return iCal object and etag of one Remind entry
filename -- the remind file
uid -- the UID of the Remind line
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Remind entries in uids
filename -- the remind file
uids -- the UIDs of the Remind lines (all if None)
"""
self._update()
if not uids:
uids = self._reminders[filename]
items = []
for uid in uids:
cal = iCalendar()
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
etag = md5()
etag.update(self._reminders[filename][uid]['line'].encode("utf-8"))
items.append((uid, cal, '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return iCal object of Remind lines
If filename and UID are specified, the vObject only contains that event.
If only a filename is specified, the vObject contains all events in the file.
Otherwise the vObject contains all all objects of all files associated with the Remind object.
filename -- the remind file
uid -- the UID of the Remind line
"""
self._update()
cal = iCalendar()
if uid:
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
elif filename:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
else:
for filename in self._reminders:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
def stdin_to_vobject(self, lines):
"""Return iCal object of the Remind commands in lines"""
cal = iCalendar()
for event in self._parse_remind('-', lines)['-'].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
@staticmethod
def _parse_rdate(rdates):
"""Convert from iCal rdate to Remind trigdate syntax"""
trigdates = [rdate.strftime("trigdate()=='%Y-%m-%d'") for rdate in rdates]
return 'SATISFY [%s]' % '||'.join(trigdates)
@staticmethod
def _parse_rruleset(rruleset):
"""Convert from iCal rrule to Remind recurrence syntax"""
# pylint: disable=protected-access
if rruleset._rrule[0]._freq == 0:
return []
rep = []
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
rep.append('*1')
elif rruleset._rrule[0]._freq == rrule.DAILY:
rep.append('*%d' % rruleset._rrule[0]._interval)
elif rruleset._rrule[0]._freq == rrule.WEEKLY:
rep.append('*%d' % (7 * rruleset._rrule[0]._interval))
else:
return Remind._parse_rdate(rruleset._rrule[0])
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
daynums = set(range(7)) - set(rruleset._rrule[0]._byweekday)
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
days = [weekdays[day] for day in daynums]
rep.append('SKIP OMIT %s' % ' '.join(days))
if rruleset._rrule[0]._until:
rep.append(rruleset._rrule[0]._until.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
elif rruleset._rrule[0]._count:
rep.append(rruleset[-1].strftime('UNTIL %b %d %Y').replace(' 0', ' '))
return rep
@staticmethod
def _event_duration(vevent):
"""unify dtend and duration to the duration of the given vevent"""
if hasattr(vevent, 'dtend'):
return vevent.dtend.value - vevent.dtstart.value
elif hasattr(vevent, 'duration') and vevent.duration.value:
return vevent.duration.value
return timedelta(0)
@staticmethod
def _gen_msg(vevent, label, tail, sep):
"""Generate a Remind MSG from the given vevent.
Opposite of _gen_description()
"""
rem = ['MSG']
msg = []
if label:
msg.append(label)
if hasattr(vevent, 'summary') and vevent.summary.value:
msg.append(Remind._rem_clean(vevent.summary.value))
else:
msg.append('empty reminder')
if hasattr(vevent, 'location') and vevent.location.value:
msg.append('at %s' % Remind._rem_clean(vevent.location.value))
has_desc = hasattr(vevent, 'description') and vevent.description.value
if tail or has_desc:
rem.append('%%"%s%%"' % ' '.join(msg))
else:
rem.append(' '.join(msg))
if tail:
rem.append(tail)
if has_desc:
rem[-1] += sep + Remind._rem_clean(vevent.description.value)
return ' '.join(rem)
@staticmethod
def _rem_clean(rem):
"""Strip, transform newlines, and escape '[' in string so it's
acceptable as a remind entry."""
return rem.strip().replace('\n', '%_').replace('[', '["["]')
@staticmethod
def _abbr_tag(tag):
"""Transform a string so it's acceptable as a remind tag. """
return tag.replace(" ", "")[:48]
def to_remind(self, vevent, label=None, priority=None, tags=None, tail=None,
sep=" ", postdate=None, posttime=None):
"""Generate a Remind command from the given vevent"""
remind = ['REM']
trigdates = None
if hasattr(vevent, 'rrule'):
trigdates = Remind._parse_rruleset(vevent.rruleset)
dtstart = vevent.dtstart.value
# If we don't get timezone information, handle it as a naive datetime.
# See https://github.com/jspricke/python-remind/issues/2 for reference.
if isinstance(dtstart, datetime) and dtstart.tzinfo:
dtstart = dtstart.astimezone(self._localtz)
dtend = None
if hasattr(vevent, 'dtend'):
dtend = vevent.dtend.value
if isinstance(dtend, datetime) and dtend.tzinfo:
dtend = dtend.astimezone(self._localtz)
if not hasattr(vevent, 'rdate') and not isinstance(trigdates, str):
remind.append(dtstart.strftime('%b %d %Y').replace(' 0', ' '))
if postdate:
remind.append(postdate)
if priority:
remind.append('PRIORITY %s' % priority)
if isinstance(trigdates, list):
remind.extend(trigdates)
duration = Remind._event_duration(vevent)
if type(dtstart) is date and duration.days > 1:
remind.append('*1')
if dtend is not None:
dtend -= timedelta(days=1)
remind.append(dtend.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
if isinstance(dtstart, datetime):
remind.append(dtstart.strftime('AT %H:%M').replace(' 0', ' '))
if posttime:
remind.append(posttime)
if duration.total_seconds() > 0:
remind.append('DURATION %d:%02d' % divmod(duration.total_seconds() / 60, 60))
if hasattr(vevent, 'rdate'):
remind.append(Remind._parse_rdate(vevent.rdate.value))
elif isinstance(trigdates, str):
remind.append(trigdates)
if hasattr(vevent, 'class'):
remind.append('TAG %s' % Remind._abbr_tag(vevent.getChildValue('class')))
if tags:
remind.extend(['TAG %s' % Remind._abbr_tag(tag) for tag in tags])
if hasattr(vevent, 'categories_list'):
for categories in vevent.categories_list:
for category in categories.value:
remind.append('TAG %s' % Remind._abbr_tag(category))
remind.append(Remind._gen_msg(vevent, label, tail, sep))
return ' '.join(remind) + '\n'
def to_reminders(self, ical, label=None, priority=None, tags=None,
tail=None, sep=" ", postdate=None, posttime=None):
"""Return Remind commands for all events of a iCalendar"""
if not hasattr(ical, 'vevent_list'):
return ''
reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,
postdate, posttime)
for vevent in ical.vevent_list]
return ''.join(reminders)
def append(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
return self.append_vobject(readOne(ical), filename)
def append_vobject(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
with self._lock:
outdat = self.to_reminders(ical)
open(filename, 'a').write(outdat)
return Remind._get_uid(outdat)
def remove(self, uid, filename=None):
"""Remove the Remind command with the uid from the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(filename, 'w').writelines(rem)
break
def replace(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
return self.replace_vobject(uid, readOne(ical), filename)
def replace_vobject(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
rem[index] = self.to_reminders(ical)
new_uid = self._get_uid(rem[index])
open(filename, 'w').writelines(rem)
return new_uid
def move_vobject(self, uid, from_file, to_file):
"""Move the Remind command with the uid from from_file to to_file"""
if from_file not in self._reminders or to_file not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(from_file).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(from_file, 'w').writelines(rem)
open(to_file, 'a').write(line)
break
def get_meta(self):
"""Meta tags of the vObject collection"""
return {'tag': 'VCALENDAR', 'C:supported-calendar-component-set': 'VEVENT'}
def last_modified(self):
"""Last time the Remind files where parsed"""
self._update()
return self._mtime
def rem2ics():
"""Command line tool to convert from Remind to iCalendar"""
# pylint: disable=maybe-no-member
from argparse import ArgumentParser, FileType
from dateutil.parser import parse
from sys import stdin, stdout
parser = ArgumentParser(description='Converter from Remind to iCalendar syntax.')
parser.add_argument('-s', '--startdate', type=lambda s: parse(s).date(),
default=date.today() - timedelta(weeks=12),
help='Start offset for remind call (default: -12 weeks)')
parser.add_argument('-m', '--month', type=int, default=15,
help='Number of month to generate calendar beginning wit startdate (default: 15)')
parser.add_argument('-a', '--alarm', type=int, default=-10,
help='Trigger time for the alarm before the event in minutes (default: -10)')
parser.add_argument('-z', '--zone',
help='Timezone of Remind file (default: local timezone)')
parser.add_argument('infile', nargs='?', default=expanduser('~/.reminders'),
help='The Remind file to process (default: ~/.reminders)')
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output iCalendar file (default: stdout)')
args = parser.parse_args()
zone = timezone(args.zone) if args.zone else None
if args.infile == '-':
remind = Remind(args.infile, zone, args.startdate, args.month, timedelta(minutes=args.alarm))
vobject = remind.stdin_to_vobject(stdin.read())
if vobject:
args.outfile.write(vobject.serialize())
else:
remind = Remind(args.infile, zone, args.startdate, args.month, timedelta(minutes=args.alarm))
args.outfile.write(remind.to_vobject().serialize())
|
jspricke/python-remind | remind.py | Remind._parse_remind | python | def _parse_remind(self, filename, lines=''):
files = {}
reminders = {}
if lines:
filename = '-'
files[filename] = lines
reminders[filename] = {}
cmd = ['remind', '-l', '-s%d' % self._month, '-b1', '-y', '-r',
filename, str(self._startdate)]
try:
rem = Popen(cmd, stdin=PIPE, stdout=PIPE).communicate(input=lines.encode('utf-8'))[0].decode('utf-8')
except OSError:
raise OSError('Error running: %s' % ' '.join(cmd))
rem = rem.splitlines()
for (fileinfo, line) in zip(rem[::2], rem[1::2]):
fileinfo = fileinfo.split()
src_filename = fileinfo[3]
if src_filename not in files:
# There is a race condition with the remind call above here.
# This could be solved by parsing the remind -de output,
# but I don't see an easy way to do that.
files[src_filename] = open(src_filename).readlines()
reminders[src_filename] = {}
mtime = getmtime(src_filename)
if mtime > self._mtime:
self._mtime = mtime
text = files[src_filename][int(fileinfo[2]) - 1]
event = self._parse_remind_line(line, text)
if event['uid'] in reminders[src_filename]:
reminders[src_filename][event['uid']]['dtstart'] += event['dtstart']
reminders[src_filename][event['uid']]['line'] += line
else:
reminders[src_filename][event['uid']] = event
reminders[src_filename][event['uid']]['line'] = line
# Find included files without reminders and add them to the file list
for source in files.values():
for line in source:
if line.startswith('include'):
new_file = line.split(' ')[1].strip()
if new_file not in reminders:
reminders[new_file] = {}
mtime = getmtime(new_file)
if mtime > self._mtime:
self._mtime = mtime
return reminders | Calls remind and parses the output into a dict
filename -- the remind file (included files will be used as well)
lines -- used as stdin to remind (filename will be set to -) | train | https://github.com/jspricke/python-remind/blob/dda2aa8fc20b87b9c9fcbca2b67bce73911d05d1/remind.py#L54-L109 | [
"def _parse_remind_line(self, line, text):\n \"\"\"Parse a line of remind output into a dict\n\n line -- the remind output\n text -- the original remind input\n \"\"\"\n event = {}\n line = line.split(None, 6)\n dat = [int(f) for f in line[0].split('/')]\n if line[4] != '*':\n start = divmod(int(line[4]), 60)\n event['dtstart'] = [datetime(dat[0], dat[1], dat[2], start[0], start[1], tzinfo=self._localtz)]\n if line[3] != '*':\n event['duration'] = timedelta(minutes=int(line[3]))\n else:\n event['dtstart'] = [date(dat[0], dat[1], dat[2])]\n\n msg = ' '.join(line[5:]) if line[4] == '*' else line[6]\n msg = msg.strip().replace('%_', '\\n').replace('[\"[\"]', '[')\n\n if ' at ' in msg:\n (event['msg'], event['location']) = msg.rsplit(' at ', 1)\n else:\n event['msg'] = msg\n\n if '%\"' in text:\n event['description'] = Remind._gen_description(text)\n\n tags = line[2].split(',')\n\n classes = ['PUBLIC', 'PRIVATE', 'CONFIDENTIAL']\n\n for tag in tags[:-1]:\n if tag in classes:\n event['class'] = tag\n\n event['categories'] = [tag for tag in tags[:-1] if tag not in classes]\n\n event['uid'] = '%s@%s' % (tags[-1][7:], getfqdn())\n\n return event\n"
] | class Remind(object):
"""Represents a collection of Remind files"""
def __init__(self, filename=expanduser('~/.reminders'), localtz=None,
startdate=date.today() - timedelta(weeks=12), month=15,
alarm=timedelta(minutes=-10)):
"""Constructor
filename -- the remind file (included files will be used as well)
localtz -- the timezone of the remind file
startdate -- the date to start parsing, will be passed to remind
month -- how many month to parse, will be passed to remind -s
"""
self._localtz = localtz if localtz else get_localzone()
self._filename = filename
self._startdate = startdate
self._month = month
self._lock = Lock()
self._reminders = {}
self._mtime = 0
self._alarm = alarm
self._update()
@staticmethod
def _gen_description(text):
"""Convert from Remind MSG to iCal description
Opposite of _gen_msg()
"""
return text[text.rfind('%"') + 3:].replace('%_', '\n').replace('["["]', '[').strip()
def _parse_remind_line(self, line, text):
"""Parse a line of remind output into a dict
line -- the remind output
text -- the original remind input
"""
event = {}
line = line.split(None, 6)
dat = [int(f) for f in line[0].split('/')]
if line[4] != '*':
start = divmod(int(line[4]), 60)
event['dtstart'] = [datetime(dat[0], dat[1], dat[2], start[0], start[1], tzinfo=self._localtz)]
if line[3] != '*':
event['duration'] = timedelta(minutes=int(line[3]))
else:
event['dtstart'] = [date(dat[0], dat[1], dat[2])]
msg = ' '.join(line[5:]) if line[4] == '*' else line[6]
msg = msg.strip().replace('%_', '\n').replace('["["]', '[')
if ' at ' in msg:
(event['msg'], event['location']) = msg.rsplit(' at ', 1)
else:
event['msg'] = msg
if '%"' in text:
event['description'] = Remind._gen_description(text)
tags = line[2].split(',')
classes = ['PUBLIC', 'PRIVATE', 'CONFIDENTIAL']
for tag in tags[:-1]:
if tag in classes:
event['class'] = tag
event['categories'] = [tag for tag in tags[:-1] if tag not in classes]
event['uid'] = '%s@%s' % (tags[-1][7:], getfqdn())
return event
@staticmethod
def _interval(dates):
"""Return the distance between all dates and 0 if they are different"""
interval = (dates[1] - dates[0]).days
last = dates[0]
for dat in dates[1:]:
if (dat - last).days != interval:
return 0
last = dat
return interval
@staticmethod
def _gen_dtend_rrule(dtstarts, vevent):
"""Generate an rdate or rrule from a list of dates and add it to the vevent"""
interval = Remind._interval(dtstarts)
if interval > 0 and interval % 7 == 0:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.WEEKLY, interval=interval // 7, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 1:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, interval=interval, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 0:
if isinstance(dtstarts[0], datetime):
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, count=len(dtstarts)))
vevent.rruleset = rset
else:
vevent.add('dtend').value = dtstarts[-1] + timedelta(days=1)
else:
rset = rrule.rruleset()
if isinstance(dtstarts[0], datetime):
for dat in dtstarts:
rset.rdate(dat)
else:
for dat in dtstarts:
rset.rdate(datetime(dat.year, dat.month, dat.day))
# temporary set dtstart to a different date, so it's not
# removed from rset by python-vobject works around bug in
# Android:
# https://github.com/rfc2822/davdroid/issues/340
vevent.dtstart.value = dtstarts[0] - timedelta(days=1)
vevent.rruleset = rset
vevent.dtstart.value = dtstarts[0]
if not isinstance(dtstarts[0], datetime):
vevent.add('dtend').value = dtstarts[0] + timedelta(days=1)
def _gen_vevent(self, event, vevent):
"""Generate vevent from given event"""
vevent.add('dtstart').value = event['dtstart'][0]
vevent.add('dtstamp').value = datetime.fromtimestamp(self._mtime)
vevent.add('summary').value = event['msg']
vevent.add('uid').value = event['uid']
if 'class' in event:
vevent.add('class').value = event['class']
if 'categories' in event and len(event['categories']) > 0:
vevent.add('categories').value = event['categories']
if 'location' in event:
vevent.add('location').value = event['location']
if 'description' in event:
vevent.add('description').value = event['description']
if isinstance(event['dtstart'][0], datetime):
if self._alarm != timedelta():
valarm = vevent.add('valarm')
valarm.add('trigger').value = self._alarm
valarm.add('action').value = 'DISPLAY'
valarm.add('description').value = event['msg']
if 'duration' in event:
vevent.add('duration').value = event['duration']
else:
vevent.add('dtend').value = event['dtstart'][0]
elif len(event['dtstart']) == 1:
vevent.add('dtend').value = event['dtstart'][0] + timedelta(days=1)
if len(event['dtstart']) > 1:
Remind._gen_dtend_rrule(event['dtstart'], vevent)
def _update(self):
"""Reload Remind files if the mtime is newer"""
update = not self._reminders
with self._lock:
for fname in self._reminders:
if getmtime(fname) > self._mtime:
update = True
break
if update:
self._reminders = self._parse_remind(self._filename)
def get_filesnames(self):
"""All filenames parsed by remind (including included files)"""
self._update()
return list(self._reminders.keys())
@staticmethod
def _get_uid(line):
"""UID of a remind line"""
return '%s@%s' % (md5(line[:-1].encode('utf-8')).hexdigest(), getfqdn())
def get_uids(self, filename=None):
"""UIDs of all reminders in the file excluding included files
If a filename is specified, only it's UIDs are return, otherwise all.
filename -- the remind file
"""
self._update()
if filename:
if filename not in self._reminders:
return []
return self._reminders[filename].keys()
return [uid for uids in self._reminders.values() for uid in uids]
def to_vobject_etag(self, filename, uid):
"""Return iCal object and etag of one Remind entry
filename -- the remind file
uid -- the UID of the Remind line
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Remind entries in uids
filename -- the remind file
uids -- the UIDs of the Remind lines (all if None)
"""
self._update()
if not uids:
uids = self._reminders[filename]
items = []
for uid in uids:
cal = iCalendar()
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
etag = md5()
etag.update(self._reminders[filename][uid]['line'].encode("utf-8"))
items.append((uid, cal, '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return iCal object of Remind lines
If filename and UID are specified, the vObject only contains that event.
If only a filename is specified, the vObject contains all events in the file.
Otherwise the vObject contains all all objects of all files associated with the Remind object.
filename -- the remind file
uid -- the UID of the Remind line
"""
self._update()
cal = iCalendar()
if uid:
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
elif filename:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
else:
for filename in self._reminders:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
def stdin_to_vobject(self, lines):
"""Return iCal object of the Remind commands in lines"""
cal = iCalendar()
for event in self._parse_remind('-', lines)['-'].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
@staticmethod
def _parse_rdate(rdates):
"""Convert from iCal rdate to Remind trigdate syntax"""
trigdates = [rdate.strftime("trigdate()=='%Y-%m-%d'") for rdate in rdates]
return 'SATISFY [%s]' % '||'.join(trigdates)
@staticmethod
def _parse_rruleset(rruleset):
"""Convert from iCal rrule to Remind recurrence syntax"""
# pylint: disable=protected-access
if rruleset._rrule[0]._freq == 0:
return []
rep = []
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
rep.append('*1')
elif rruleset._rrule[0]._freq == rrule.DAILY:
rep.append('*%d' % rruleset._rrule[0]._interval)
elif rruleset._rrule[0]._freq == rrule.WEEKLY:
rep.append('*%d' % (7 * rruleset._rrule[0]._interval))
else:
return Remind._parse_rdate(rruleset._rrule[0])
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
daynums = set(range(7)) - set(rruleset._rrule[0]._byweekday)
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
days = [weekdays[day] for day in daynums]
rep.append('SKIP OMIT %s' % ' '.join(days))
if rruleset._rrule[0]._until:
rep.append(rruleset._rrule[0]._until.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
elif rruleset._rrule[0]._count:
rep.append(rruleset[-1].strftime('UNTIL %b %d %Y').replace(' 0', ' '))
return rep
@staticmethod
def _event_duration(vevent):
"""unify dtend and duration to the duration of the given vevent"""
if hasattr(vevent, 'dtend'):
return vevent.dtend.value - vevent.dtstart.value
elif hasattr(vevent, 'duration') and vevent.duration.value:
return vevent.duration.value
return timedelta(0)
@staticmethod
def _gen_msg(vevent, label, tail, sep):
"""Generate a Remind MSG from the given vevent.
Opposite of _gen_description()
"""
rem = ['MSG']
msg = []
if label:
msg.append(label)
if hasattr(vevent, 'summary') and vevent.summary.value:
msg.append(Remind._rem_clean(vevent.summary.value))
else:
msg.append('empty reminder')
if hasattr(vevent, 'location') and vevent.location.value:
msg.append('at %s' % Remind._rem_clean(vevent.location.value))
has_desc = hasattr(vevent, 'description') and vevent.description.value
if tail or has_desc:
rem.append('%%"%s%%"' % ' '.join(msg))
else:
rem.append(' '.join(msg))
if tail:
rem.append(tail)
if has_desc:
rem[-1] += sep + Remind._rem_clean(vevent.description.value)
return ' '.join(rem)
@staticmethod
def _rem_clean(rem):
"""Strip, transform newlines, and escape '[' in string so it's
acceptable as a remind entry."""
return rem.strip().replace('\n', '%_').replace('[', '["["]')
@staticmethod
def _abbr_tag(tag):
"""Transform a string so it's acceptable as a remind tag. """
return tag.replace(" ", "")[:48]
def to_remind(self, vevent, label=None, priority=None, tags=None, tail=None,
sep=" ", postdate=None, posttime=None):
"""Generate a Remind command from the given vevent"""
remind = ['REM']
trigdates = None
if hasattr(vevent, 'rrule'):
trigdates = Remind._parse_rruleset(vevent.rruleset)
dtstart = vevent.dtstart.value
# If we don't get timezone information, handle it as a naive datetime.
# See https://github.com/jspricke/python-remind/issues/2 for reference.
if isinstance(dtstart, datetime) and dtstart.tzinfo:
dtstart = dtstart.astimezone(self._localtz)
dtend = None
if hasattr(vevent, 'dtend'):
dtend = vevent.dtend.value
if isinstance(dtend, datetime) and dtend.tzinfo:
dtend = dtend.astimezone(self._localtz)
if not hasattr(vevent, 'rdate') and not isinstance(trigdates, str):
remind.append(dtstart.strftime('%b %d %Y').replace(' 0', ' '))
if postdate:
remind.append(postdate)
if priority:
remind.append('PRIORITY %s' % priority)
if isinstance(trigdates, list):
remind.extend(trigdates)
duration = Remind._event_duration(vevent)
if type(dtstart) is date and duration.days > 1:
remind.append('*1')
if dtend is not None:
dtend -= timedelta(days=1)
remind.append(dtend.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
if isinstance(dtstart, datetime):
remind.append(dtstart.strftime('AT %H:%M').replace(' 0', ' '))
if posttime:
remind.append(posttime)
if duration.total_seconds() > 0:
remind.append('DURATION %d:%02d' % divmod(duration.total_seconds() / 60, 60))
if hasattr(vevent, 'rdate'):
remind.append(Remind._parse_rdate(vevent.rdate.value))
elif isinstance(trigdates, str):
remind.append(trigdates)
if hasattr(vevent, 'class'):
remind.append('TAG %s' % Remind._abbr_tag(vevent.getChildValue('class')))
if tags:
remind.extend(['TAG %s' % Remind._abbr_tag(tag) for tag in tags])
if hasattr(vevent, 'categories_list'):
for categories in vevent.categories_list:
for category in categories.value:
remind.append('TAG %s' % Remind._abbr_tag(category))
remind.append(Remind._gen_msg(vevent, label, tail, sep))
return ' '.join(remind) + '\n'
def to_reminders(self, ical, label=None, priority=None, tags=None,
tail=None, sep=" ", postdate=None, posttime=None):
"""Return Remind commands for all events of a iCalendar"""
if not hasattr(ical, 'vevent_list'):
return ''
reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,
postdate, posttime)
for vevent in ical.vevent_list]
return ''.join(reminders)
def append(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
return self.append_vobject(readOne(ical), filename)
def append_vobject(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
with self._lock:
outdat = self.to_reminders(ical)
open(filename, 'a').write(outdat)
return Remind._get_uid(outdat)
def remove(self, uid, filename=None):
"""Remove the Remind command with the uid from the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(filename, 'w').writelines(rem)
break
def replace(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
return self.replace_vobject(uid, readOne(ical), filename)
def replace_vobject(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
rem[index] = self.to_reminders(ical)
new_uid = self._get_uid(rem[index])
open(filename, 'w').writelines(rem)
return new_uid
def move_vobject(self, uid, from_file, to_file):
"""Move the Remind command with the uid from from_file to to_file"""
if from_file not in self._reminders or to_file not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(from_file).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(from_file, 'w').writelines(rem)
open(to_file, 'a').write(line)
break
def get_meta(self):
"""Meta tags of the vObject collection"""
return {'tag': 'VCALENDAR', 'C:supported-calendar-component-set': 'VEVENT'}
def last_modified(self):
"""Last time the Remind files where parsed"""
self._update()
return self._mtime
|
jspricke/python-remind | remind.py | Remind._parse_remind_line | python | def _parse_remind_line(self, line, text):
event = {}
line = line.split(None, 6)
dat = [int(f) for f in line[0].split('/')]
if line[4] != '*':
start = divmod(int(line[4]), 60)
event['dtstart'] = [datetime(dat[0], dat[1], dat[2], start[0], start[1], tzinfo=self._localtz)]
if line[3] != '*':
event['duration'] = timedelta(minutes=int(line[3]))
else:
event['dtstart'] = [date(dat[0], dat[1], dat[2])]
msg = ' '.join(line[5:]) if line[4] == '*' else line[6]
msg = msg.strip().replace('%_', '\n').replace('["["]', '[')
if ' at ' in msg:
(event['msg'], event['location']) = msg.rsplit(' at ', 1)
else:
event['msg'] = msg
if '%"' in text:
event['description'] = Remind._gen_description(text)
tags = line[2].split(',')
classes = ['PUBLIC', 'PRIVATE', 'CONFIDENTIAL']
for tag in tags[:-1]:
if tag in classes:
event['class'] = tag
event['categories'] = [tag for tag in tags[:-1] if tag not in classes]
event['uid'] = '%s@%s' % (tags[-1][7:], getfqdn())
return event | Parse a line of remind output into a dict
line -- the remind output
text -- the original remind input | train | https://github.com/jspricke/python-remind/blob/dda2aa8fc20b87b9c9fcbca2b67bce73911d05d1/remind.py#L118-L158 | null | class Remind(object):
"""Represents a collection of Remind files"""
def __init__(self, filename=expanduser('~/.reminders'), localtz=None,
startdate=date.today() - timedelta(weeks=12), month=15,
alarm=timedelta(minutes=-10)):
"""Constructor
filename -- the remind file (included files will be used as well)
localtz -- the timezone of the remind file
startdate -- the date to start parsing, will be passed to remind
month -- how many month to parse, will be passed to remind -s
"""
self._localtz = localtz if localtz else get_localzone()
self._filename = filename
self._startdate = startdate
self._month = month
self._lock = Lock()
self._reminders = {}
self._mtime = 0
self._alarm = alarm
self._update()
def _parse_remind(self, filename, lines=''):
"""Calls remind and parses the output into a dict
filename -- the remind file (included files will be used as well)
lines -- used as stdin to remind (filename will be set to -)
"""
files = {}
reminders = {}
if lines:
filename = '-'
files[filename] = lines
reminders[filename] = {}
cmd = ['remind', '-l', '-s%d' % self._month, '-b1', '-y', '-r',
filename, str(self._startdate)]
try:
rem = Popen(cmd, stdin=PIPE, stdout=PIPE).communicate(input=lines.encode('utf-8'))[0].decode('utf-8')
except OSError:
raise OSError('Error running: %s' % ' '.join(cmd))
rem = rem.splitlines()
for (fileinfo, line) in zip(rem[::2], rem[1::2]):
fileinfo = fileinfo.split()
src_filename = fileinfo[3]
if src_filename not in files:
# There is a race condition with the remind call above here.
# This could be solved by parsing the remind -de output,
# but I don't see an easy way to do that.
files[src_filename] = open(src_filename).readlines()
reminders[src_filename] = {}
mtime = getmtime(src_filename)
if mtime > self._mtime:
self._mtime = mtime
text = files[src_filename][int(fileinfo[2]) - 1]
event = self._parse_remind_line(line, text)
if event['uid'] in reminders[src_filename]:
reminders[src_filename][event['uid']]['dtstart'] += event['dtstart']
reminders[src_filename][event['uid']]['line'] += line
else:
reminders[src_filename][event['uid']] = event
reminders[src_filename][event['uid']]['line'] = line
# Find included files without reminders and add them to the file list
for source in files.values():
for line in source:
if line.startswith('include'):
new_file = line.split(' ')[1].strip()
if new_file not in reminders:
reminders[new_file] = {}
mtime = getmtime(new_file)
if mtime > self._mtime:
self._mtime = mtime
return reminders
@staticmethod
def _gen_description(text):
"""Convert from Remind MSG to iCal description
Opposite of _gen_msg()
"""
return text[text.rfind('%"') + 3:].replace('%_', '\n').replace('["["]', '[').strip()
@staticmethod
def _interval(dates):
"""Return the distance between all dates and 0 if they are different"""
interval = (dates[1] - dates[0]).days
last = dates[0]
for dat in dates[1:]:
if (dat - last).days != interval:
return 0
last = dat
return interval
@staticmethod
def _gen_dtend_rrule(dtstarts, vevent):
"""Generate an rdate or rrule from a list of dates and add it to the vevent"""
interval = Remind._interval(dtstarts)
if interval > 0 and interval % 7 == 0:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.WEEKLY, interval=interval // 7, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 1:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, interval=interval, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 0:
if isinstance(dtstarts[0], datetime):
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, count=len(dtstarts)))
vevent.rruleset = rset
else:
vevent.add('dtend').value = dtstarts[-1] + timedelta(days=1)
else:
rset = rrule.rruleset()
if isinstance(dtstarts[0], datetime):
for dat in dtstarts:
rset.rdate(dat)
else:
for dat in dtstarts:
rset.rdate(datetime(dat.year, dat.month, dat.day))
# temporary set dtstart to a different date, so it's not
# removed from rset by python-vobject works around bug in
# Android:
# https://github.com/rfc2822/davdroid/issues/340
vevent.dtstart.value = dtstarts[0] - timedelta(days=1)
vevent.rruleset = rset
vevent.dtstart.value = dtstarts[0]
if not isinstance(dtstarts[0], datetime):
vevent.add('dtend').value = dtstarts[0] + timedelta(days=1)
def _gen_vevent(self, event, vevent):
"""Generate vevent from given event"""
vevent.add('dtstart').value = event['dtstart'][0]
vevent.add('dtstamp').value = datetime.fromtimestamp(self._mtime)
vevent.add('summary').value = event['msg']
vevent.add('uid').value = event['uid']
if 'class' in event:
vevent.add('class').value = event['class']
if 'categories' in event and len(event['categories']) > 0:
vevent.add('categories').value = event['categories']
if 'location' in event:
vevent.add('location').value = event['location']
if 'description' in event:
vevent.add('description').value = event['description']
if isinstance(event['dtstart'][0], datetime):
if self._alarm != timedelta():
valarm = vevent.add('valarm')
valarm.add('trigger').value = self._alarm
valarm.add('action').value = 'DISPLAY'
valarm.add('description').value = event['msg']
if 'duration' in event:
vevent.add('duration').value = event['duration']
else:
vevent.add('dtend').value = event['dtstart'][0]
elif len(event['dtstart']) == 1:
vevent.add('dtend').value = event['dtstart'][0] + timedelta(days=1)
if len(event['dtstart']) > 1:
Remind._gen_dtend_rrule(event['dtstart'], vevent)
def _update(self):
"""Reload Remind files if the mtime is newer"""
update = not self._reminders
with self._lock:
for fname in self._reminders:
if getmtime(fname) > self._mtime:
update = True
break
if update:
self._reminders = self._parse_remind(self._filename)
def get_filesnames(self):
"""All filenames parsed by remind (including included files)"""
self._update()
return list(self._reminders.keys())
@staticmethod
def _get_uid(line):
"""UID of a remind line"""
return '%s@%s' % (md5(line[:-1].encode('utf-8')).hexdigest(), getfqdn())
def get_uids(self, filename=None):
"""UIDs of all reminders in the file excluding included files
If a filename is specified, only it's UIDs are return, otherwise all.
filename -- the remind file
"""
self._update()
if filename:
if filename not in self._reminders:
return []
return self._reminders[filename].keys()
return [uid for uids in self._reminders.values() for uid in uids]
def to_vobject_etag(self, filename, uid):
"""Return iCal object and etag of one Remind entry
filename -- the remind file
uid -- the UID of the Remind line
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Remind entries in uids
filename -- the remind file
uids -- the UIDs of the Remind lines (all if None)
"""
self._update()
if not uids:
uids = self._reminders[filename]
items = []
for uid in uids:
cal = iCalendar()
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
etag = md5()
etag.update(self._reminders[filename][uid]['line'].encode("utf-8"))
items.append((uid, cal, '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return iCal object of Remind lines
If filename and UID are specified, the vObject only contains that event.
If only a filename is specified, the vObject contains all events in the file.
Otherwise the vObject contains all all objects of all files associated with the Remind object.
filename -- the remind file
uid -- the UID of the Remind line
"""
self._update()
cal = iCalendar()
if uid:
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
elif filename:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
else:
for filename in self._reminders:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
def stdin_to_vobject(self, lines):
"""Return iCal object of the Remind commands in lines"""
cal = iCalendar()
for event in self._parse_remind('-', lines)['-'].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
@staticmethod
def _parse_rdate(rdates):
"""Convert from iCal rdate to Remind trigdate syntax"""
trigdates = [rdate.strftime("trigdate()=='%Y-%m-%d'") for rdate in rdates]
return 'SATISFY [%s]' % '||'.join(trigdates)
@staticmethod
def _parse_rruleset(rruleset):
"""Convert from iCal rrule to Remind recurrence syntax"""
# pylint: disable=protected-access
if rruleset._rrule[0]._freq == 0:
return []
rep = []
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
rep.append('*1')
elif rruleset._rrule[0]._freq == rrule.DAILY:
rep.append('*%d' % rruleset._rrule[0]._interval)
elif rruleset._rrule[0]._freq == rrule.WEEKLY:
rep.append('*%d' % (7 * rruleset._rrule[0]._interval))
else:
return Remind._parse_rdate(rruleset._rrule[0])
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
daynums = set(range(7)) - set(rruleset._rrule[0]._byweekday)
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
days = [weekdays[day] for day in daynums]
rep.append('SKIP OMIT %s' % ' '.join(days))
if rruleset._rrule[0]._until:
rep.append(rruleset._rrule[0]._until.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
elif rruleset._rrule[0]._count:
rep.append(rruleset[-1].strftime('UNTIL %b %d %Y').replace(' 0', ' '))
return rep
@staticmethod
def _event_duration(vevent):
"""unify dtend and duration to the duration of the given vevent"""
if hasattr(vevent, 'dtend'):
return vevent.dtend.value - vevent.dtstart.value
elif hasattr(vevent, 'duration') and vevent.duration.value:
return vevent.duration.value
return timedelta(0)
@staticmethod
def _gen_msg(vevent, label, tail, sep):
"""Generate a Remind MSG from the given vevent.
Opposite of _gen_description()
"""
rem = ['MSG']
msg = []
if label:
msg.append(label)
if hasattr(vevent, 'summary') and vevent.summary.value:
msg.append(Remind._rem_clean(vevent.summary.value))
else:
msg.append('empty reminder')
if hasattr(vevent, 'location') and vevent.location.value:
msg.append('at %s' % Remind._rem_clean(vevent.location.value))
has_desc = hasattr(vevent, 'description') and vevent.description.value
if tail or has_desc:
rem.append('%%"%s%%"' % ' '.join(msg))
else:
rem.append(' '.join(msg))
if tail:
rem.append(tail)
if has_desc:
rem[-1] += sep + Remind._rem_clean(vevent.description.value)
return ' '.join(rem)
@staticmethod
def _rem_clean(rem):
"""Strip, transform newlines, and escape '[' in string so it's
acceptable as a remind entry."""
return rem.strip().replace('\n', '%_').replace('[', '["["]')
@staticmethod
def _abbr_tag(tag):
"""Transform a string so it's acceptable as a remind tag. """
return tag.replace(" ", "")[:48]
def to_remind(self, vevent, label=None, priority=None, tags=None, tail=None,
sep=" ", postdate=None, posttime=None):
"""Generate a Remind command from the given vevent"""
remind = ['REM']
trigdates = None
if hasattr(vevent, 'rrule'):
trigdates = Remind._parse_rruleset(vevent.rruleset)
dtstart = vevent.dtstart.value
# If we don't get timezone information, handle it as a naive datetime.
# See https://github.com/jspricke/python-remind/issues/2 for reference.
if isinstance(dtstart, datetime) and dtstart.tzinfo:
dtstart = dtstart.astimezone(self._localtz)
dtend = None
if hasattr(vevent, 'dtend'):
dtend = vevent.dtend.value
if isinstance(dtend, datetime) and dtend.tzinfo:
dtend = dtend.astimezone(self._localtz)
if not hasattr(vevent, 'rdate') and not isinstance(trigdates, str):
remind.append(dtstart.strftime('%b %d %Y').replace(' 0', ' '))
if postdate:
remind.append(postdate)
if priority:
remind.append('PRIORITY %s' % priority)
if isinstance(trigdates, list):
remind.extend(trigdates)
duration = Remind._event_duration(vevent)
if type(dtstart) is date and duration.days > 1:
remind.append('*1')
if dtend is not None:
dtend -= timedelta(days=1)
remind.append(dtend.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
if isinstance(dtstart, datetime):
remind.append(dtstart.strftime('AT %H:%M').replace(' 0', ' '))
if posttime:
remind.append(posttime)
if duration.total_seconds() > 0:
remind.append('DURATION %d:%02d' % divmod(duration.total_seconds() / 60, 60))
if hasattr(vevent, 'rdate'):
remind.append(Remind._parse_rdate(vevent.rdate.value))
elif isinstance(trigdates, str):
remind.append(trigdates)
if hasattr(vevent, 'class'):
remind.append('TAG %s' % Remind._abbr_tag(vevent.getChildValue('class')))
if tags:
remind.extend(['TAG %s' % Remind._abbr_tag(tag) for tag in tags])
if hasattr(vevent, 'categories_list'):
for categories in vevent.categories_list:
for category in categories.value:
remind.append('TAG %s' % Remind._abbr_tag(category))
remind.append(Remind._gen_msg(vevent, label, tail, sep))
return ' '.join(remind) + '\n'
def to_reminders(self, ical, label=None, priority=None, tags=None,
tail=None, sep=" ", postdate=None, posttime=None):
"""Return Remind commands for all events of a iCalendar"""
if not hasattr(ical, 'vevent_list'):
return ''
reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,
postdate, posttime)
for vevent in ical.vevent_list]
return ''.join(reminders)
def append(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
return self.append_vobject(readOne(ical), filename)
def append_vobject(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
with self._lock:
outdat = self.to_reminders(ical)
open(filename, 'a').write(outdat)
return Remind._get_uid(outdat)
def remove(self, uid, filename=None):
"""Remove the Remind command with the uid from the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(filename, 'w').writelines(rem)
break
def replace(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
return self.replace_vobject(uid, readOne(ical), filename)
def replace_vobject(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
rem[index] = self.to_reminders(ical)
new_uid = self._get_uid(rem[index])
open(filename, 'w').writelines(rem)
return new_uid
def move_vobject(self, uid, from_file, to_file):
"""Move the Remind command with the uid from from_file to to_file"""
if from_file not in self._reminders or to_file not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(from_file).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(from_file, 'w').writelines(rem)
open(to_file, 'a').write(line)
break
def get_meta(self):
"""Meta tags of the vObject collection"""
return {'tag': 'VCALENDAR', 'C:supported-calendar-component-set': 'VEVENT'}
def last_modified(self):
"""Last time the Remind files where parsed"""
self._update()
return self._mtime
|
jspricke/python-remind | remind.py | Remind._interval | python | def _interval(dates):
interval = (dates[1] - dates[0]).days
last = dates[0]
for dat in dates[1:]:
if (dat - last).days != interval:
return 0
last = dat
return interval | Return the distance between all dates and 0 if they are different | train | https://github.com/jspricke/python-remind/blob/dda2aa8fc20b87b9c9fcbca2b67bce73911d05d1/remind.py#L161-L169 | null | class Remind(object):
"""Represents a collection of Remind files"""
def __init__(self, filename=expanduser('~/.reminders'), localtz=None,
startdate=date.today() - timedelta(weeks=12), month=15,
alarm=timedelta(minutes=-10)):
"""Constructor
filename -- the remind file (included files will be used as well)
localtz -- the timezone of the remind file
startdate -- the date to start parsing, will be passed to remind
month -- how many month to parse, will be passed to remind -s
"""
self._localtz = localtz if localtz else get_localzone()
self._filename = filename
self._startdate = startdate
self._month = month
self._lock = Lock()
self._reminders = {}
self._mtime = 0
self._alarm = alarm
self._update()
def _parse_remind(self, filename, lines=''):
"""Calls remind and parses the output into a dict
filename -- the remind file (included files will be used as well)
lines -- used as stdin to remind (filename will be set to -)
"""
files = {}
reminders = {}
if lines:
filename = '-'
files[filename] = lines
reminders[filename] = {}
cmd = ['remind', '-l', '-s%d' % self._month, '-b1', '-y', '-r',
filename, str(self._startdate)]
try:
rem = Popen(cmd, stdin=PIPE, stdout=PIPE).communicate(input=lines.encode('utf-8'))[0].decode('utf-8')
except OSError:
raise OSError('Error running: %s' % ' '.join(cmd))
rem = rem.splitlines()
for (fileinfo, line) in zip(rem[::2], rem[1::2]):
fileinfo = fileinfo.split()
src_filename = fileinfo[3]
if src_filename not in files:
# There is a race condition with the remind call above here.
# This could be solved by parsing the remind -de output,
# but I don't see an easy way to do that.
files[src_filename] = open(src_filename).readlines()
reminders[src_filename] = {}
mtime = getmtime(src_filename)
if mtime > self._mtime:
self._mtime = mtime
text = files[src_filename][int(fileinfo[2]) - 1]
event = self._parse_remind_line(line, text)
if event['uid'] in reminders[src_filename]:
reminders[src_filename][event['uid']]['dtstart'] += event['dtstart']
reminders[src_filename][event['uid']]['line'] += line
else:
reminders[src_filename][event['uid']] = event
reminders[src_filename][event['uid']]['line'] = line
# Find included files without reminders and add them to the file list
for source in files.values():
for line in source:
if line.startswith('include'):
new_file = line.split(' ')[1].strip()
if new_file not in reminders:
reminders[new_file] = {}
mtime = getmtime(new_file)
if mtime > self._mtime:
self._mtime = mtime
return reminders
@staticmethod
def _gen_description(text):
"""Convert from Remind MSG to iCal description
Opposite of _gen_msg()
"""
return text[text.rfind('%"') + 3:].replace('%_', '\n').replace('["["]', '[').strip()
def _parse_remind_line(self, line, text):
"""Parse a line of remind output into a dict
line -- the remind output
text -- the original remind input
"""
event = {}
line = line.split(None, 6)
dat = [int(f) for f in line[0].split('/')]
if line[4] != '*':
start = divmod(int(line[4]), 60)
event['dtstart'] = [datetime(dat[0], dat[1], dat[2], start[0], start[1], tzinfo=self._localtz)]
if line[3] != '*':
event['duration'] = timedelta(minutes=int(line[3]))
else:
event['dtstart'] = [date(dat[0], dat[1], dat[2])]
msg = ' '.join(line[5:]) if line[4] == '*' else line[6]
msg = msg.strip().replace('%_', '\n').replace('["["]', '[')
if ' at ' in msg:
(event['msg'], event['location']) = msg.rsplit(' at ', 1)
else:
event['msg'] = msg
if '%"' in text:
event['description'] = Remind._gen_description(text)
tags = line[2].split(',')
classes = ['PUBLIC', 'PRIVATE', 'CONFIDENTIAL']
for tag in tags[:-1]:
if tag in classes:
event['class'] = tag
event['categories'] = [tag for tag in tags[:-1] if tag not in classes]
event['uid'] = '%s@%s' % (tags[-1][7:], getfqdn())
return event
@staticmethod
@staticmethod
def _gen_dtend_rrule(dtstarts, vevent):
"""Generate an rdate or rrule from a list of dates and add it to the vevent"""
interval = Remind._interval(dtstarts)
if interval > 0 and interval % 7 == 0:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.WEEKLY, interval=interval // 7, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 1:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, interval=interval, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 0:
if isinstance(dtstarts[0], datetime):
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, count=len(dtstarts)))
vevent.rruleset = rset
else:
vevent.add('dtend').value = dtstarts[-1] + timedelta(days=1)
else:
rset = rrule.rruleset()
if isinstance(dtstarts[0], datetime):
for dat in dtstarts:
rset.rdate(dat)
else:
for dat in dtstarts:
rset.rdate(datetime(dat.year, dat.month, dat.day))
# temporary set dtstart to a different date, so it's not
# removed from rset by python-vobject works around bug in
# Android:
# https://github.com/rfc2822/davdroid/issues/340
vevent.dtstart.value = dtstarts[0] - timedelta(days=1)
vevent.rruleset = rset
vevent.dtstart.value = dtstarts[0]
if not isinstance(dtstarts[0], datetime):
vevent.add('dtend').value = dtstarts[0] + timedelta(days=1)
def _gen_vevent(self, event, vevent):
"""Generate vevent from given event"""
vevent.add('dtstart').value = event['dtstart'][0]
vevent.add('dtstamp').value = datetime.fromtimestamp(self._mtime)
vevent.add('summary').value = event['msg']
vevent.add('uid').value = event['uid']
if 'class' in event:
vevent.add('class').value = event['class']
if 'categories' in event and len(event['categories']) > 0:
vevent.add('categories').value = event['categories']
if 'location' in event:
vevent.add('location').value = event['location']
if 'description' in event:
vevent.add('description').value = event['description']
if isinstance(event['dtstart'][0], datetime):
if self._alarm != timedelta():
valarm = vevent.add('valarm')
valarm.add('trigger').value = self._alarm
valarm.add('action').value = 'DISPLAY'
valarm.add('description').value = event['msg']
if 'duration' in event:
vevent.add('duration').value = event['duration']
else:
vevent.add('dtend').value = event['dtstart'][0]
elif len(event['dtstart']) == 1:
vevent.add('dtend').value = event['dtstart'][0] + timedelta(days=1)
if len(event['dtstart']) > 1:
Remind._gen_dtend_rrule(event['dtstart'], vevent)
def _update(self):
"""Reload Remind files if the mtime is newer"""
update = not self._reminders
with self._lock:
for fname in self._reminders:
if getmtime(fname) > self._mtime:
update = True
break
if update:
self._reminders = self._parse_remind(self._filename)
def get_filesnames(self):
"""All filenames parsed by remind (including included files)"""
self._update()
return list(self._reminders.keys())
@staticmethod
def _get_uid(line):
"""UID of a remind line"""
return '%s@%s' % (md5(line[:-1].encode('utf-8')).hexdigest(), getfqdn())
def get_uids(self, filename=None):
"""UIDs of all reminders in the file excluding included files
If a filename is specified, only it's UIDs are return, otherwise all.
filename -- the remind file
"""
self._update()
if filename:
if filename not in self._reminders:
return []
return self._reminders[filename].keys()
return [uid for uids in self._reminders.values() for uid in uids]
def to_vobject_etag(self, filename, uid):
"""Return iCal object and etag of one Remind entry
filename -- the remind file
uid -- the UID of the Remind line
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Remind entries in uids
filename -- the remind file
uids -- the UIDs of the Remind lines (all if None)
"""
self._update()
if not uids:
uids = self._reminders[filename]
items = []
for uid in uids:
cal = iCalendar()
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
etag = md5()
etag.update(self._reminders[filename][uid]['line'].encode("utf-8"))
items.append((uid, cal, '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return iCal object of Remind lines
If filename and UID are specified, the vObject only contains that event.
If only a filename is specified, the vObject contains all events in the file.
Otherwise the vObject contains all all objects of all files associated with the Remind object.
filename -- the remind file
uid -- the UID of the Remind line
"""
self._update()
cal = iCalendar()
if uid:
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
elif filename:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
else:
for filename in self._reminders:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
def stdin_to_vobject(self, lines):
"""Return iCal object of the Remind commands in lines"""
cal = iCalendar()
for event in self._parse_remind('-', lines)['-'].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
@staticmethod
def _parse_rdate(rdates):
"""Convert from iCal rdate to Remind trigdate syntax"""
trigdates = [rdate.strftime("trigdate()=='%Y-%m-%d'") for rdate in rdates]
return 'SATISFY [%s]' % '||'.join(trigdates)
@staticmethod
def _parse_rruleset(rruleset):
"""Convert from iCal rrule to Remind recurrence syntax"""
# pylint: disable=protected-access
if rruleset._rrule[0]._freq == 0:
return []
rep = []
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
rep.append('*1')
elif rruleset._rrule[0]._freq == rrule.DAILY:
rep.append('*%d' % rruleset._rrule[0]._interval)
elif rruleset._rrule[0]._freq == rrule.WEEKLY:
rep.append('*%d' % (7 * rruleset._rrule[0]._interval))
else:
return Remind._parse_rdate(rruleset._rrule[0])
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
daynums = set(range(7)) - set(rruleset._rrule[0]._byweekday)
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
days = [weekdays[day] for day in daynums]
rep.append('SKIP OMIT %s' % ' '.join(days))
if rruleset._rrule[0]._until:
rep.append(rruleset._rrule[0]._until.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
elif rruleset._rrule[0]._count:
rep.append(rruleset[-1].strftime('UNTIL %b %d %Y').replace(' 0', ' '))
return rep
@staticmethod
def _event_duration(vevent):
"""unify dtend and duration to the duration of the given vevent"""
if hasattr(vevent, 'dtend'):
return vevent.dtend.value - vevent.dtstart.value
elif hasattr(vevent, 'duration') and vevent.duration.value:
return vevent.duration.value
return timedelta(0)
@staticmethod
def _gen_msg(vevent, label, tail, sep):
"""Generate a Remind MSG from the given vevent.
Opposite of _gen_description()
"""
rem = ['MSG']
msg = []
if label:
msg.append(label)
if hasattr(vevent, 'summary') and vevent.summary.value:
msg.append(Remind._rem_clean(vevent.summary.value))
else:
msg.append('empty reminder')
if hasattr(vevent, 'location') and vevent.location.value:
msg.append('at %s' % Remind._rem_clean(vevent.location.value))
has_desc = hasattr(vevent, 'description') and vevent.description.value
if tail or has_desc:
rem.append('%%"%s%%"' % ' '.join(msg))
else:
rem.append(' '.join(msg))
if tail:
rem.append(tail)
if has_desc:
rem[-1] += sep + Remind._rem_clean(vevent.description.value)
return ' '.join(rem)
@staticmethod
def _rem_clean(rem):
"""Strip, transform newlines, and escape '[' in string so it's
acceptable as a remind entry."""
return rem.strip().replace('\n', '%_').replace('[', '["["]')
@staticmethod
def _abbr_tag(tag):
"""Transform a string so it's acceptable as a remind tag. """
return tag.replace(" ", "")[:48]
def to_remind(self, vevent, label=None, priority=None, tags=None, tail=None,
sep=" ", postdate=None, posttime=None):
"""Generate a Remind command from the given vevent"""
remind = ['REM']
trigdates = None
if hasattr(vevent, 'rrule'):
trigdates = Remind._parse_rruleset(vevent.rruleset)
dtstart = vevent.dtstart.value
# If we don't get timezone information, handle it as a naive datetime.
# See https://github.com/jspricke/python-remind/issues/2 for reference.
if isinstance(dtstart, datetime) and dtstart.tzinfo:
dtstart = dtstart.astimezone(self._localtz)
dtend = None
if hasattr(vevent, 'dtend'):
dtend = vevent.dtend.value
if isinstance(dtend, datetime) and dtend.tzinfo:
dtend = dtend.astimezone(self._localtz)
if not hasattr(vevent, 'rdate') and not isinstance(trigdates, str):
remind.append(dtstart.strftime('%b %d %Y').replace(' 0', ' '))
if postdate:
remind.append(postdate)
if priority:
remind.append('PRIORITY %s' % priority)
if isinstance(trigdates, list):
remind.extend(trigdates)
duration = Remind._event_duration(vevent)
if type(dtstart) is date and duration.days > 1:
remind.append('*1')
if dtend is not None:
dtend -= timedelta(days=1)
remind.append(dtend.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
if isinstance(dtstart, datetime):
remind.append(dtstart.strftime('AT %H:%M').replace(' 0', ' '))
if posttime:
remind.append(posttime)
if duration.total_seconds() > 0:
remind.append('DURATION %d:%02d' % divmod(duration.total_seconds() / 60, 60))
if hasattr(vevent, 'rdate'):
remind.append(Remind._parse_rdate(vevent.rdate.value))
elif isinstance(trigdates, str):
remind.append(trigdates)
if hasattr(vevent, 'class'):
remind.append('TAG %s' % Remind._abbr_tag(vevent.getChildValue('class')))
if tags:
remind.extend(['TAG %s' % Remind._abbr_tag(tag) for tag in tags])
if hasattr(vevent, 'categories_list'):
for categories in vevent.categories_list:
for category in categories.value:
remind.append('TAG %s' % Remind._abbr_tag(category))
remind.append(Remind._gen_msg(vevent, label, tail, sep))
return ' '.join(remind) + '\n'
def to_reminders(self, ical, label=None, priority=None, tags=None,
tail=None, sep=" ", postdate=None, posttime=None):
"""Return Remind commands for all events of a iCalendar"""
if not hasattr(ical, 'vevent_list'):
return ''
reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,
postdate, posttime)
for vevent in ical.vevent_list]
return ''.join(reminders)
def append(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
return self.append_vobject(readOne(ical), filename)
def append_vobject(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
with self._lock:
outdat = self.to_reminders(ical)
open(filename, 'a').write(outdat)
return Remind._get_uid(outdat)
def remove(self, uid, filename=None):
"""Remove the Remind command with the uid from the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(filename, 'w').writelines(rem)
break
def replace(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
return self.replace_vobject(uid, readOne(ical), filename)
def replace_vobject(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
rem[index] = self.to_reminders(ical)
new_uid = self._get_uid(rem[index])
open(filename, 'w').writelines(rem)
return new_uid
def move_vobject(self, uid, from_file, to_file):
"""Move the Remind command with the uid from from_file to to_file"""
if from_file not in self._reminders or to_file not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(from_file).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(from_file, 'w').writelines(rem)
open(to_file, 'a').write(line)
break
def get_meta(self):
"""Meta tags of the vObject collection"""
return {'tag': 'VCALENDAR', 'C:supported-calendar-component-set': 'VEVENT'}
def last_modified(self):
"""Last time the Remind files where parsed"""
self._update()
return self._mtime
|
jspricke/python-remind | remind.py | Remind._gen_dtend_rrule | python | def _gen_dtend_rrule(dtstarts, vevent):
interval = Remind._interval(dtstarts)
if interval > 0 and interval % 7 == 0:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.WEEKLY, interval=interval // 7, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 1:
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, interval=interval, count=len(dtstarts)))
vevent.rruleset = rset
elif interval > 0:
if isinstance(dtstarts[0], datetime):
rset = rrule.rruleset()
rset.rrule(rrule.rrule(freq=rrule.DAILY, count=len(dtstarts)))
vevent.rruleset = rset
else:
vevent.add('dtend').value = dtstarts[-1] + timedelta(days=1)
else:
rset = rrule.rruleset()
if isinstance(dtstarts[0], datetime):
for dat in dtstarts:
rset.rdate(dat)
else:
for dat in dtstarts:
rset.rdate(datetime(dat.year, dat.month, dat.day))
# temporary set dtstart to a different date, so it's not
# removed from rset by python-vobject works around bug in
# Android:
# https://github.com/rfc2822/davdroid/issues/340
vevent.dtstart.value = dtstarts[0] - timedelta(days=1)
vevent.rruleset = rset
vevent.dtstart.value = dtstarts[0]
if not isinstance(dtstarts[0], datetime):
vevent.add('dtend').value = dtstarts[0] + timedelta(days=1) | Generate an rdate or rrule from a list of dates and add it to the vevent | train | https://github.com/jspricke/python-remind/blob/dda2aa8fc20b87b9c9fcbca2b67bce73911d05d1/remind.py#L172-L206 | null | class Remind(object):
"""Represents a collection of Remind files"""
def __init__(self, filename=expanduser('~/.reminders'), localtz=None,
startdate=date.today() - timedelta(weeks=12), month=15,
alarm=timedelta(minutes=-10)):
"""Constructor
filename -- the remind file (included files will be used as well)
localtz -- the timezone of the remind file
startdate -- the date to start parsing, will be passed to remind
month -- how many month to parse, will be passed to remind -s
"""
self._localtz = localtz if localtz else get_localzone()
self._filename = filename
self._startdate = startdate
self._month = month
self._lock = Lock()
self._reminders = {}
self._mtime = 0
self._alarm = alarm
self._update()
def _parse_remind(self, filename, lines=''):
"""Calls remind and parses the output into a dict
filename -- the remind file (included files will be used as well)
lines -- used as stdin to remind (filename will be set to -)
"""
files = {}
reminders = {}
if lines:
filename = '-'
files[filename] = lines
reminders[filename] = {}
cmd = ['remind', '-l', '-s%d' % self._month, '-b1', '-y', '-r',
filename, str(self._startdate)]
try:
rem = Popen(cmd, stdin=PIPE, stdout=PIPE).communicate(input=lines.encode('utf-8'))[0].decode('utf-8')
except OSError:
raise OSError('Error running: %s' % ' '.join(cmd))
rem = rem.splitlines()
for (fileinfo, line) in zip(rem[::2], rem[1::2]):
fileinfo = fileinfo.split()
src_filename = fileinfo[3]
if src_filename not in files:
# There is a race condition with the remind call above here.
# This could be solved by parsing the remind -de output,
# but I don't see an easy way to do that.
files[src_filename] = open(src_filename).readlines()
reminders[src_filename] = {}
mtime = getmtime(src_filename)
if mtime > self._mtime:
self._mtime = mtime
text = files[src_filename][int(fileinfo[2]) - 1]
event = self._parse_remind_line(line, text)
if event['uid'] in reminders[src_filename]:
reminders[src_filename][event['uid']]['dtstart'] += event['dtstart']
reminders[src_filename][event['uid']]['line'] += line
else:
reminders[src_filename][event['uid']] = event
reminders[src_filename][event['uid']]['line'] = line
# Find included files without reminders and add them to the file list
for source in files.values():
for line in source:
if line.startswith('include'):
new_file = line.split(' ')[1].strip()
if new_file not in reminders:
reminders[new_file] = {}
mtime = getmtime(new_file)
if mtime > self._mtime:
self._mtime = mtime
return reminders
@staticmethod
def _gen_description(text):
"""Convert from Remind MSG to iCal description
Opposite of _gen_msg()
"""
return text[text.rfind('%"') + 3:].replace('%_', '\n').replace('["["]', '[').strip()
def _parse_remind_line(self, line, text):
"""Parse a line of remind output into a dict
line -- the remind output
text -- the original remind input
"""
event = {}
line = line.split(None, 6)
dat = [int(f) for f in line[0].split('/')]
if line[4] != '*':
start = divmod(int(line[4]), 60)
event['dtstart'] = [datetime(dat[0], dat[1], dat[2], start[0], start[1], tzinfo=self._localtz)]
if line[3] != '*':
event['duration'] = timedelta(minutes=int(line[3]))
else:
event['dtstart'] = [date(dat[0], dat[1], dat[2])]
msg = ' '.join(line[5:]) if line[4] == '*' else line[6]
msg = msg.strip().replace('%_', '\n').replace('["["]', '[')
if ' at ' in msg:
(event['msg'], event['location']) = msg.rsplit(' at ', 1)
else:
event['msg'] = msg
if '%"' in text:
event['description'] = Remind._gen_description(text)
tags = line[2].split(',')
classes = ['PUBLIC', 'PRIVATE', 'CONFIDENTIAL']
for tag in tags[:-1]:
if tag in classes:
event['class'] = tag
event['categories'] = [tag for tag in tags[:-1] if tag not in classes]
event['uid'] = '%s@%s' % (tags[-1][7:], getfqdn())
return event
@staticmethod
def _interval(dates):
"""Return the distance between all dates and 0 if they are different"""
interval = (dates[1] - dates[0]).days
last = dates[0]
for dat in dates[1:]:
if (dat - last).days != interval:
return 0
last = dat
return interval
@staticmethod
def _gen_vevent(self, event, vevent):
"""Generate vevent from given event"""
vevent.add('dtstart').value = event['dtstart'][0]
vevent.add('dtstamp').value = datetime.fromtimestamp(self._mtime)
vevent.add('summary').value = event['msg']
vevent.add('uid').value = event['uid']
if 'class' in event:
vevent.add('class').value = event['class']
if 'categories' in event and len(event['categories']) > 0:
vevent.add('categories').value = event['categories']
if 'location' in event:
vevent.add('location').value = event['location']
if 'description' in event:
vevent.add('description').value = event['description']
if isinstance(event['dtstart'][0], datetime):
if self._alarm != timedelta():
valarm = vevent.add('valarm')
valarm.add('trigger').value = self._alarm
valarm.add('action').value = 'DISPLAY'
valarm.add('description').value = event['msg']
if 'duration' in event:
vevent.add('duration').value = event['duration']
else:
vevent.add('dtend').value = event['dtstart'][0]
elif len(event['dtstart']) == 1:
vevent.add('dtend').value = event['dtstart'][0] + timedelta(days=1)
if len(event['dtstart']) > 1:
Remind._gen_dtend_rrule(event['dtstart'], vevent)
def _update(self):
"""Reload Remind files if the mtime is newer"""
update = not self._reminders
with self._lock:
for fname in self._reminders:
if getmtime(fname) > self._mtime:
update = True
break
if update:
self._reminders = self._parse_remind(self._filename)
def get_filesnames(self):
"""All filenames parsed by remind (including included files)"""
self._update()
return list(self._reminders.keys())
@staticmethod
def _get_uid(line):
"""UID of a remind line"""
return '%s@%s' % (md5(line[:-1].encode('utf-8')).hexdigest(), getfqdn())
def get_uids(self, filename=None):
"""UIDs of all reminders in the file excluding included files
If a filename is specified, only it's UIDs are return, otherwise all.
filename -- the remind file
"""
self._update()
if filename:
if filename not in self._reminders:
return []
return self._reminders[filename].keys()
return [uid for uids in self._reminders.values() for uid in uids]
def to_vobject_etag(self, filename, uid):
"""Return iCal object and etag of one Remind entry
filename -- the remind file
uid -- the UID of the Remind line
"""
return self.to_vobjects(filename, [uid])[0][1:3]
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Remind entries in uids
filename -- the remind file
uids -- the UIDs of the Remind lines (all if None)
"""
self._update()
if not uids:
uids = self._reminders[filename]
items = []
for uid in uids:
cal = iCalendar()
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
etag = md5()
etag.update(self._reminders[filename][uid]['line'].encode("utf-8"))
items.append((uid, cal, '"%s"' % etag.hexdigest()))
return items
def to_vobject(self, filename=None, uid=None):
"""Return iCal object of Remind lines
If filename and UID are specified, the vObject only contains that event.
If only a filename is specified, the vObject contains all events in the file.
Otherwise the vObject contains all all objects of all files associated with the Remind object.
filename -- the remind file
uid -- the UID of the Remind line
"""
self._update()
cal = iCalendar()
if uid:
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
elif filename:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
else:
for filename in self._reminders:
for event in self._reminders[filename].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
def stdin_to_vobject(self, lines):
"""Return iCal object of the Remind commands in lines"""
cal = iCalendar()
for event in self._parse_remind('-', lines)['-'].values():
self._gen_vevent(event, cal.add('vevent'))
return cal
@staticmethod
def _parse_rdate(rdates):
"""Convert from iCal rdate to Remind trigdate syntax"""
trigdates = [rdate.strftime("trigdate()=='%Y-%m-%d'") for rdate in rdates]
return 'SATISFY [%s]' % '||'.join(trigdates)
@staticmethod
def _parse_rruleset(rruleset):
"""Convert from iCal rrule to Remind recurrence syntax"""
# pylint: disable=protected-access
if rruleset._rrule[0]._freq == 0:
return []
rep = []
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
rep.append('*1')
elif rruleset._rrule[0]._freq == rrule.DAILY:
rep.append('*%d' % rruleset._rrule[0]._interval)
elif rruleset._rrule[0]._freq == rrule.WEEKLY:
rep.append('*%d' % (7 * rruleset._rrule[0]._interval))
else:
return Remind._parse_rdate(rruleset._rrule[0])
if rruleset._rrule[0]._byweekday and len(rruleset._rrule[0]._byweekday) > 1:
daynums = set(range(7)) - set(rruleset._rrule[0]._byweekday)
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
days = [weekdays[day] for day in daynums]
rep.append('SKIP OMIT %s' % ' '.join(days))
if rruleset._rrule[0]._until:
rep.append(rruleset._rrule[0]._until.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
elif rruleset._rrule[0]._count:
rep.append(rruleset[-1].strftime('UNTIL %b %d %Y').replace(' 0', ' '))
return rep
@staticmethod
def _event_duration(vevent):
"""unify dtend and duration to the duration of the given vevent"""
if hasattr(vevent, 'dtend'):
return vevent.dtend.value - vevent.dtstart.value
elif hasattr(vevent, 'duration') and vevent.duration.value:
return vevent.duration.value
return timedelta(0)
@staticmethod
def _gen_msg(vevent, label, tail, sep):
"""Generate a Remind MSG from the given vevent.
Opposite of _gen_description()
"""
rem = ['MSG']
msg = []
if label:
msg.append(label)
if hasattr(vevent, 'summary') and vevent.summary.value:
msg.append(Remind._rem_clean(vevent.summary.value))
else:
msg.append('empty reminder')
if hasattr(vevent, 'location') and vevent.location.value:
msg.append('at %s' % Remind._rem_clean(vevent.location.value))
has_desc = hasattr(vevent, 'description') and vevent.description.value
if tail or has_desc:
rem.append('%%"%s%%"' % ' '.join(msg))
else:
rem.append(' '.join(msg))
if tail:
rem.append(tail)
if has_desc:
rem[-1] += sep + Remind._rem_clean(vevent.description.value)
return ' '.join(rem)
@staticmethod
def _rem_clean(rem):
"""Strip, transform newlines, and escape '[' in string so it's
acceptable as a remind entry."""
return rem.strip().replace('\n', '%_').replace('[', '["["]')
@staticmethod
def _abbr_tag(tag):
"""Transform a string so it's acceptable as a remind tag. """
return tag.replace(" ", "")[:48]
def to_remind(self, vevent, label=None, priority=None, tags=None, tail=None,
sep=" ", postdate=None, posttime=None):
"""Generate a Remind command from the given vevent"""
remind = ['REM']
trigdates = None
if hasattr(vevent, 'rrule'):
trigdates = Remind._parse_rruleset(vevent.rruleset)
dtstart = vevent.dtstart.value
# If we don't get timezone information, handle it as a naive datetime.
# See https://github.com/jspricke/python-remind/issues/2 for reference.
if isinstance(dtstart, datetime) and dtstart.tzinfo:
dtstart = dtstart.astimezone(self._localtz)
dtend = None
if hasattr(vevent, 'dtend'):
dtend = vevent.dtend.value
if isinstance(dtend, datetime) and dtend.tzinfo:
dtend = dtend.astimezone(self._localtz)
if not hasattr(vevent, 'rdate') and not isinstance(trigdates, str):
remind.append(dtstart.strftime('%b %d %Y').replace(' 0', ' '))
if postdate:
remind.append(postdate)
if priority:
remind.append('PRIORITY %s' % priority)
if isinstance(trigdates, list):
remind.extend(trigdates)
duration = Remind._event_duration(vevent)
if type(dtstart) is date and duration.days > 1:
remind.append('*1')
if dtend is not None:
dtend -= timedelta(days=1)
remind.append(dtend.strftime('UNTIL %b %d %Y').replace(' 0', ' '))
if isinstance(dtstart, datetime):
remind.append(dtstart.strftime('AT %H:%M').replace(' 0', ' '))
if posttime:
remind.append(posttime)
if duration.total_seconds() > 0:
remind.append('DURATION %d:%02d' % divmod(duration.total_seconds() / 60, 60))
if hasattr(vevent, 'rdate'):
remind.append(Remind._parse_rdate(vevent.rdate.value))
elif isinstance(trigdates, str):
remind.append(trigdates)
if hasattr(vevent, 'class'):
remind.append('TAG %s' % Remind._abbr_tag(vevent.getChildValue('class')))
if tags:
remind.extend(['TAG %s' % Remind._abbr_tag(tag) for tag in tags])
if hasattr(vevent, 'categories_list'):
for categories in vevent.categories_list:
for category in categories.value:
remind.append('TAG %s' % Remind._abbr_tag(category))
remind.append(Remind._gen_msg(vevent, label, tail, sep))
return ' '.join(remind) + '\n'
def to_reminders(self, ical, label=None, priority=None, tags=None,
tail=None, sep=" ", postdate=None, posttime=None):
"""Return Remind commands for all events of a iCalendar"""
if not hasattr(ical, 'vevent_list'):
return ''
reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,
postdate, posttime)
for vevent in ical.vevent_list]
return ''.join(reminders)
def append(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
return self.append_vobject(readOne(ical), filename)
def append_vobject(self, ical, filename=None):
"""Append a Remind command generated from the iCalendar to the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
with self._lock:
outdat = self.to_reminders(ical)
open(filename, 'a').write(outdat)
return Remind._get_uid(outdat)
def remove(self, uid, filename=None):
"""Remove the Remind command with the uid from the file"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(filename, 'w').writelines(rem)
break
def replace(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
return self.replace_vobject(uid, readOne(ical), filename)
def replace_vobject(self, uid, ical, filename=None):
"""Update the Remind command with the uid in the file with the new iCalendar"""
if not filename:
filename = self._filename
elif filename not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(filename).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
rem[index] = self.to_reminders(ical)
new_uid = self._get_uid(rem[index])
open(filename, 'w').writelines(rem)
return new_uid
def move_vobject(self, uid, from_file, to_file):
"""Move the Remind command with the uid from from_file to to_file"""
if from_file not in self._reminders or to_file not in self._reminders:
return
uid = uid.split('@')[0]
with self._lock:
rem = open(from_file).readlines()
for (index, line) in enumerate(rem):
if uid == md5(line[:-1].encode('utf-8')).hexdigest():
del rem[index]
open(from_file, 'w').writelines(rem)
open(to_file, 'a').write(line)
break
def get_meta(self):
"""Meta tags of the vObject collection"""
return {'tag': 'VCALENDAR', 'C:supported-calendar-component-set': 'VEVENT'}
def last_modified(self):
"""Last time the Remind files where parsed"""
self._update()
return self._mtime
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.