repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
contentful/contentful-management.py
contentful_management/resource_builder.py
ResourceBuilder.build
def build(self): """ Creates the objects from the JSON response. """ if self.json['sys']['type'] == 'Array': return self._build_array() return self._build_item(self.json)
python
def build(self): """ Creates the objects from the JSON response. """ if self.json['sys']['type'] == 'Array': return self._build_array() return self._build_item(self.json)
[ "def", "build", "(", "self", ")", ":", "if", "self", ".", "json", "[", "'sys'", "]", "[", "'type'", "]", "==", "'Array'", ":", "return", "self", ".", "_build_array", "(", ")", "return", "self", ".", "_build_item", "(", "self", ".", "json", ")" ]
Creates the objects from the JSON response.
[ "Creates", "the", "objects", "from", "the", "JSON", "response", "." ]
707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/resource_builder.py#L47-L54
train
contentful/contentful-management.py
contentful_management/content_type_resource_proxy.py
ContentTypeResourceProxy.create
def create(self, resource_id=None, attributes=None): """ Creates a resource with a given ID (optional) and attributes for the current content type. """ return self.proxy.create(resource_id=resource_id, attributes=attributes)
python
def create(self, resource_id=None, attributes=None): """ Creates a resource with a given ID (optional) and attributes for the current content type. """ return self.proxy.create(resource_id=resource_id, attributes=attributes)
[ "def", "create", "(", "self", ",", "resource_id", "=", "None", ",", "attributes", "=", "None", ")", ":", "return", "self", ".", "proxy", ".", "create", "(", "resource_id", "=", "resource_id", ",", "attributes", "=", "attributes", ")" ]
Creates a resource with a given ID (optional) and attributes for the current content type.
[ "Creates", "a", "resource", "with", "a", "given", "ID", "(", "optional", ")", "and", "attributes", "for", "the", "current", "content", "type", "." ]
707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/content_type_resource_proxy.py#L43-L48
train
contentful/contentful-management.py
contentful_management/space_resource_proxy.py
SpaceResourceProxy.find
def find(self, resource_id, query=None): """ Finds a single resource by ID related to the current space. """ return self.proxy.find(resource_id, query=query)
python
def find(self, resource_id, query=None): """ Finds a single resource by ID related to the current space. """ return self.proxy.find(resource_id, query=query)
[ "def", "find", "(", "self", ",", "resource_id", ",", "query", "=", "None", ")", ":", "return", "self", ".", "proxy", ".", "find", "(", "resource_id", ",", "query", "=", "query", ")" ]
Finds a single resource by ID related to the current space.
[ "Finds", "a", "single", "resource", "by", "ID", "related", "to", "the", "current", "space", "." ]
707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/space_resource_proxy.py#L36-L41
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_ngroups
def get_ngroups(self, field=None): ''' Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for. ''' field = field if field else self._determine_group_field(field) if 'ngroups' in self.data['grouped'][field]: return self.data['grouped'][field]['ngroups'] raise ValueError("ngroups not found in response. specify group.ngroups in the query.")
python
def get_ngroups(self, field=None): ''' Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for. ''' field = field if field else self._determine_group_field(field) if 'ngroups' in self.data['grouped'][field]: return self.data['grouped'][field]['ngroups'] raise ValueError("ngroups not found in response. specify group.ngroups in the query.")
[ "def", "get_ngroups", "(", "self", ",", "field", "=", "None", ")", ":", "field", "=", "field", "if", "field", "else", "self", ".", "_determine_group_field", "(", "field", ")", "if", "'ngroups'", "in", "self", ".", "data", "[", "'grouped'", "]", "[", "field", "]", ":", "return", "self", ".", "data", "[", "'grouped'", "]", "[", "field", "]", "[", "'ngroups'", "]", "raise", "ValueError", "(", "\"ngroups not found in response. specify group.ngroups in the query.\"", ")" ]
Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for.
[ "Returns", "ngroups", "count", "if", "it", "was", "specified", "in", "the", "query", "otherwise", "ValueError", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L77-L86
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_groups_count
def get_groups_count(self, field=None): ''' Returns 'matches' from group response. If grouping on more than one field, provide the field argument to specify which count you are looking for. ''' field = field if field else self._determine_group_field(field) if 'matches' in self.data['grouped'][field]: return self.data['grouped'][field]['matches'] raise ValueError("group matches not found in response")
python
def get_groups_count(self, field=None): ''' Returns 'matches' from group response. If grouping on more than one field, provide the field argument to specify which count you are looking for. ''' field = field if field else self._determine_group_field(field) if 'matches' in self.data['grouped'][field]: return self.data['grouped'][field]['matches'] raise ValueError("group matches not found in response")
[ "def", "get_groups_count", "(", "self", ",", "field", "=", "None", ")", ":", "field", "=", "field", "if", "field", "else", "self", ".", "_determine_group_field", "(", "field", ")", "if", "'matches'", "in", "self", ".", "data", "[", "'grouped'", "]", "[", "field", "]", ":", "return", "self", ".", "data", "[", "'grouped'", "]", "[", "field", "]", "[", "'matches'", "]", "raise", "ValueError", "(", "\"group matches not found in response\"", ")" ]
Returns 'matches' from group response. If grouping on more than one field, provide the field argument to specify which count you are looking for.
[ "Returns", "matches", "from", "group", "response", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L89-L98
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_flat_groups
def get_flat_groups(self, field=None): ''' Flattens the group response and just returns a list of documents. ''' field = field if field else self._determine_group_field(field) temp_groups = self.data['grouped'][field]['groups'] return [y for x in temp_groups for y in x['doclist']['docs']]
python
def get_flat_groups(self, field=None): ''' Flattens the group response and just returns a list of documents. ''' field = field if field else self._determine_group_field(field) temp_groups = self.data['grouped'][field]['groups'] return [y for x in temp_groups for y in x['doclist']['docs']]
[ "def", "get_flat_groups", "(", "self", ",", "field", "=", "None", ")", ":", "field", "=", "field", "if", "field", "else", "self", ".", "_determine_group_field", "(", "field", ")", "temp_groups", "=", "self", ".", "data", "[", "'grouped'", "]", "[", "field", "]", "[", "'groups'", "]", "return", "[", "y", "for", "x", "in", "temp_groups", "for", "y", "in", "x", "[", "'doclist'", "]", "[", "'docs'", "]", "]" ]
Flattens the group response and just returns a list of documents.
[ "Flattens", "the", "group", "response", "and", "just", "returns", "a", "list", "of", "documents", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L101-L107
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_facets
def get_facets(self): ''' Returns a dictionary of facets:: >>> res = solr.query('SolrClient_unittest',{ 'q':'product_name:Lorem', 'facet':True, 'facet.field':'facet_test', })... ... ... ... >>> res.get_results_count() 4 >>> res.get_facets() {'facet_test': {'ipsum': 0, 'sit': 0, 'dolor': 2, 'amet,': 1, 'Lorem': 1}} ''' if not hasattr(self,'facets'): self.facets = {} data = self.data if 'facet_counts' in data.keys() and type(data['facet_counts']) == dict: if 'facet_fields' in data['facet_counts'].keys() and type(data['facet_counts']['facet_fields']) == dict: for facetfield in data['facet_counts']['facet_fields']: if type(data['facet_counts']['facet_fields'][facetfield] == list): l = data['facet_counts']['facet_fields'][facetfield] self.facets[facetfield] = OrderedDict(zip(l[::2],l[1::2])) return self.facets else: raise SolrResponseError("No Facet Information in the Response") else: return self.facets
python
def get_facets(self): ''' Returns a dictionary of facets:: >>> res = solr.query('SolrClient_unittest',{ 'q':'product_name:Lorem', 'facet':True, 'facet.field':'facet_test', })... ... ... ... >>> res.get_results_count() 4 >>> res.get_facets() {'facet_test': {'ipsum': 0, 'sit': 0, 'dolor': 2, 'amet,': 1, 'Lorem': 1}} ''' if not hasattr(self,'facets'): self.facets = {} data = self.data if 'facet_counts' in data.keys() and type(data['facet_counts']) == dict: if 'facet_fields' in data['facet_counts'].keys() and type(data['facet_counts']['facet_fields']) == dict: for facetfield in data['facet_counts']['facet_fields']: if type(data['facet_counts']['facet_fields'][facetfield] == list): l = data['facet_counts']['facet_fields'][facetfield] self.facets[facetfield] = OrderedDict(zip(l[::2],l[1::2])) return self.facets else: raise SolrResponseError("No Facet Information in the Response") else: return self.facets
[ "def", "get_facets", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'facets'", ")", ":", "self", ".", "facets", "=", "{", "}", "data", "=", "self", ".", "data", "if", "'facet_counts'", "in", "data", ".", "keys", "(", ")", "and", "type", "(", "data", "[", "'facet_counts'", "]", ")", "==", "dict", ":", "if", "'facet_fields'", "in", "data", "[", "'facet_counts'", "]", ".", "keys", "(", ")", "and", "type", "(", "data", "[", "'facet_counts'", "]", "[", "'facet_fields'", "]", ")", "==", "dict", ":", "for", "facetfield", "in", "data", "[", "'facet_counts'", "]", "[", "'facet_fields'", "]", ":", "if", "type", "(", "data", "[", "'facet_counts'", "]", "[", "'facet_fields'", "]", "[", "facetfield", "]", "==", "list", ")", ":", "l", "=", "data", "[", "'facet_counts'", "]", "[", "'facet_fields'", "]", "[", "facetfield", "]", "self", ".", "facets", "[", "facetfield", "]", "=", "OrderedDict", "(", "zip", "(", "l", "[", ":", ":", "2", "]", ",", "l", "[", "1", ":", ":", "2", "]", ")", ")", "return", "self", ".", "facets", "else", ":", "raise", "SolrResponseError", "(", "\"No Facet Information in the Response\"", ")", "else", ":", "return", "self", ".", "facets" ]
Returns a dictionary of facets:: >>> res = solr.query('SolrClient_unittest',{ 'q':'product_name:Lorem', 'facet':True, 'facet.field':'facet_test', })... ... ... ... >>> res.get_results_count() 4 >>> res.get_facets() {'facet_test': {'ipsum': 0, 'sit': 0, 'dolor': 2, 'amet,': 1, 'Lorem': 1}}
[ "Returns", "a", "dictionary", "of", "facets", "::" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L110-L138
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_facets_ranges
def get_facets_ranges(self): ''' Returns query facet ranges :: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.range':'price', 'facet.range.start':0, 'facet.range.end':100, 'facet.range.gap':10 }) >>> res.get_facets_ranges() {'price': {'80': 9, '10': 5, '50': 3, '20': 7, '90': 3, '70': 4, '60': 7, '0': 3, '40': 5, '30': 4}} ''' if not hasattr(self,'facet_ranges'): self.facet_ranges = {} data = self.data if 'facet_counts' in data.keys() and type(data['facet_counts']) == dict: if 'facet_ranges' in data['facet_counts'].keys() and type(data['facet_counts']['facet_ranges']) == dict: for facetfield in data['facet_counts']['facet_ranges']: if type(data['facet_counts']['facet_ranges'][facetfield]['counts']) == list: l = data['facet_counts']['facet_ranges'][facetfield]['counts'] self.facet_ranges[facetfield] = OrderedDict(zip(l[::2],l[1::2])) return self.facet_ranges else: raise SolrResponseError("No Facet Ranges in the Response") else: return self.facet_ranges
python
def get_facets_ranges(self): ''' Returns query facet ranges :: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.range':'price', 'facet.range.start':0, 'facet.range.end':100, 'facet.range.gap':10 }) >>> res.get_facets_ranges() {'price': {'80': 9, '10': 5, '50': 3, '20': 7, '90': 3, '70': 4, '60': 7, '0': 3, '40': 5, '30': 4}} ''' if not hasattr(self,'facet_ranges'): self.facet_ranges = {} data = self.data if 'facet_counts' in data.keys() and type(data['facet_counts']) == dict: if 'facet_ranges' in data['facet_counts'].keys() and type(data['facet_counts']['facet_ranges']) == dict: for facetfield in data['facet_counts']['facet_ranges']: if type(data['facet_counts']['facet_ranges'][facetfield]['counts']) == list: l = data['facet_counts']['facet_ranges'][facetfield]['counts'] self.facet_ranges[facetfield] = OrderedDict(zip(l[::2],l[1::2])) return self.facet_ranges else: raise SolrResponseError("No Facet Ranges in the Response") else: return self.facet_ranges
[ "def", "get_facets_ranges", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'facet_ranges'", ")", ":", "self", ".", "facet_ranges", "=", "{", "}", "data", "=", "self", ".", "data", "if", "'facet_counts'", "in", "data", ".", "keys", "(", ")", "and", "type", "(", "data", "[", "'facet_counts'", "]", ")", "==", "dict", ":", "if", "'facet_ranges'", "in", "data", "[", "'facet_counts'", "]", ".", "keys", "(", ")", "and", "type", "(", "data", "[", "'facet_counts'", "]", "[", "'facet_ranges'", "]", ")", "==", "dict", ":", "for", "facetfield", "in", "data", "[", "'facet_counts'", "]", "[", "'facet_ranges'", "]", ":", "if", "type", "(", "data", "[", "'facet_counts'", "]", "[", "'facet_ranges'", "]", "[", "facetfield", "]", "[", "'counts'", "]", ")", "==", "list", ":", "l", "=", "data", "[", "'facet_counts'", "]", "[", "'facet_ranges'", "]", "[", "facetfield", "]", "[", "'counts'", "]", "self", ".", "facet_ranges", "[", "facetfield", "]", "=", "OrderedDict", "(", "zip", "(", "l", "[", ":", ":", "2", "]", ",", "l", "[", "1", ":", ":", "2", "]", ")", ")", "return", "self", ".", "facet_ranges", "else", ":", "raise", "SolrResponseError", "(", "\"No Facet Ranges in the Response\"", ")", "else", ":", "return", "self", ".", "facet_ranges" ]
Returns query facet ranges :: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.range':'price', 'facet.range.start':0, 'facet.range.end':100, 'facet.range.gap':10 }) >>> res.get_facets_ranges() {'price': {'80': 9, '10': 5, '50': 3, '20': 7, '90': 3, '70': 4, '60': 7, '0': 3, '40': 5, '30': 4}}
[ "Returns", "query", "facet", "ranges", "::" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L150-L179
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_facet_pivot
def get_facet_pivot(self): ''' Parses facet pivot response. Example:: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'fq':'price:[50 TO *]', 'facet':True, 'facet.pivot':'facet_test,price' #Note how there is no space between fields. They are just separated by commas }) >>> res.get_facet_pivot() {'facet_test,price': {'Lorem': {89: 1, 75: 1}, 'ipsum': {53: 1, 70: 1, 55: 1, 89: 1, 74: 1, 93: 1, 79: 1}, 'dolor': {61: 1, 94: 1}, 'sit': {99: 1, 50: 1, 67: 1, 52: 1, 54: 1, 71: 1, 72: 1, 84: 1, 62: 1}, 'amet,': {68: 1}}} This method has built in recursion and can support indefinite number of facets. However, note that the output format is significantly massaged since Solr by default outputs a list of fields in each pivot field. ''' if not hasattr(self,'facet_pivot'): self.facet_pivot = {} if 'facet_counts' in self.data.keys(): pivots = self.data['facet_counts']['facet_pivot'] for fieldset in pivots: self.facet_pivot[fieldset] = {} for sub_field_set in pivots[fieldset]: res = self._rec_subfield(sub_field_set) self.facet_pivot[fieldset].update(res) return self.facet_pivot else: return self.facet_pivot
python
def get_facet_pivot(self): ''' Parses facet pivot response. Example:: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'fq':'price:[50 TO *]', 'facet':True, 'facet.pivot':'facet_test,price' #Note how there is no space between fields. They are just separated by commas }) >>> res.get_facet_pivot() {'facet_test,price': {'Lorem': {89: 1, 75: 1}, 'ipsum': {53: 1, 70: 1, 55: 1, 89: 1, 74: 1, 93: 1, 79: 1}, 'dolor': {61: 1, 94: 1}, 'sit': {99: 1, 50: 1, 67: 1, 52: 1, 54: 1, 71: 1, 72: 1, 84: 1, 62: 1}, 'amet,': {68: 1}}} This method has built in recursion and can support indefinite number of facets. However, note that the output format is significantly massaged since Solr by default outputs a list of fields in each pivot field. ''' if not hasattr(self,'facet_pivot'): self.facet_pivot = {} if 'facet_counts' in self.data.keys(): pivots = self.data['facet_counts']['facet_pivot'] for fieldset in pivots: self.facet_pivot[fieldset] = {} for sub_field_set in pivots[fieldset]: res = self._rec_subfield(sub_field_set) self.facet_pivot[fieldset].update(res) return self.facet_pivot else: return self.facet_pivot
[ "def", "get_facet_pivot", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'facet_pivot'", ")", ":", "self", ".", "facet_pivot", "=", "{", "}", "if", "'facet_counts'", "in", "self", ".", "data", ".", "keys", "(", ")", ":", "pivots", "=", "self", ".", "data", "[", "'facet_counts'", "]", "[", "'facet_pivot'", "]", "for", "fieldset", "in", "pivots", ":", "self", ".", "facet_pivot", "[", "fieldset", "]", "=", "{", "}", "for", "sub_field_set", "in", "pivots", "[", "fieldset", "]", ":", "res", "=", "self", ".", "_rec_subfield", "(", "sub_field_set", ")", "self", ".", "facet_pivot", "[", "fieldset", "]", ".", "update", "(", "res", ")", "return", "self", ".", "facet_pivot", "else", ":", "return", "self", ".", "facet_pivot" ]
Parses facet pivot response. Example:: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'fq':'price:[50 TO *]', 'facet':True, 'facet.pivot':'facet_test,price' #Note how there is no space between fields. They are just separated by commas }) >>> res.get_facet_pivot() {'facet_test,price': {'Lorem': {89: 1, 75: 1}, 'ipsum': {53: 1, 70: 1, 55: 1, 89: 1, 74: 1, 93: 1, 79: 1}, 'dolor': {61: 1, 94: 1}, 'sit': {99: 1, 50: 1, 67: 1, 52: 1, 54: 1, 71: 1, 72: 1, 84: 1, 62: 1}, 'amet,': {68: 1}}} This method has built in recursion and can support indefinite number of facets. However, note that the output format is significantly massaged since Solr by default outputs a list of fields in each pivot field.
[ "Parses", "facet", "pivot", "response", ".", "Example", "::", ">>>", "res", "=", "solr", ".", "query", "(", "SolrClient_unittest", "{", "q", ":", "*", ":", "*", "fq", ":", "price", ":", "[", "50", "TO", "*", "]", "facet", ":", "True", "facet", ".", "pivot", ":", "facet_test", "price", "#Note", "how", "there", "is", "no", "space", "between", "fields", ".", "They", "are", "just", "separated", "by", "commas", "}", ")", ">>>", "res", ".", "get_facet_pivot", "()", "{", "facet_test", "price", ":", "{", "Lorem", ":", "{", "89", ":", "1", "75", ":", "1", "}", "ipsum", ":", "{", "53", ":", "1", "70", ":", "1", "55", ":", "1", "89", ":", "1", "74", ":", "1", "93", ":", "1", "79", ":", "1", "}", "dolor", ":", "{", "61", ":", "1", "94", ":", "1", "}", "sit", ":", "{", "99", ":", "1", "50", ":", "1", "67", ":", "1", "52", ":", "1", "54", ":", "1", "71", ":", "1", "72", ":", "1", "84", ":", "1", "62", ":", "1", "}", "amet", ":", "{", "68", ":", "1", "}}}" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L182-L207
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_field_values_as_list
def get_field_values_as_list(self,field): ''' :param str field: The name of the field for which to pull in values. Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example:: >>> r.get_field_values_as_list('product_name_exact') ['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem'] ''' return [doc[field] for doc in self.docs if field in doc]
python
def get_field_values_as_list(self,field): ''' :param str field: The name of the field for which to pull in values. Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example:: >>> r.get_field_values_as_list('product_name_exact') ['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem'] ''' return [doc[field] for doc in self.docs if field in doc]
[ "def", "get_field_values_as_list", "(", "self", ",", "field", ")", ":", "return", "[", "doc", "[", "field", "]", "for", "doc", "in", "self", ".", "docs", "if", "field", "in", "doc", "]" ]
:param str field: The name of the field for which to pull in values. Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example:: >>> r.get_field_values_as_list('product_name_exact') ['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem']
[ ":", "param", "str", "field", ":", "The", "name", "of", "the", "field", "for", "which", "to", "pull", "in", "values", ".", "Will", "parse", "the", "query", "results", "(", "must", "be", "ungrouped", ")", "and", "return", "all", "values", "of", "field", "as", "a", "list", ".", "Note", "that", "these", "are", "not", "unique", "values", ".", "Example", "::" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L222-L231
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_first_field_values_as_list
def get_first_field_values_as_list(self, field): ''' :param str field: The name of the field for lookup. Goes through all documents returned looking for specified field. At first encounter will return the field's value. ''' for doc in self.docs: if field in doc.keys(): return doc[field] raise SolrResponseError("No field in result set")
python
def get_first_field_values_as_list(self, field): ''' :param str field: The name of the field for lookup. Goes through all documents returned looking for specified field. At first encounter will return the field's value. ''' for doc in self.docs: if field in doc.keys(): return doc[field] raise SolrResponseError("No field in result set")
[ "def", "get_first_field_values_as_list", "(", "self", ",", "field", ")", ":", "for", "doc", "in", "self", ".", "docs", ":", "if", "field", "in", "doc", ".", "keys", "(", ")", ":", "return", "doc", "[", "field", "]", "raise", "SolrResponseError", "(", "\"No field in result set\"", ")" ]
:param str field: The name of the field for lookup. Goes through all documents returned looking for specified field. At first encounter will return the field's value.
[ ":", "param", "str", "field", ":", "The", "name", "of", "the", "field", "for", "lookup", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L270-L279
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_facet_values_as_list
def get_facet_values_as_list(self, field): ''' :param str field: Name of facet field to retrieve values from. Returns facet values as list for a given field. Example:: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':'true', 'facet.field':'facet_test', }) >>> res.get_facet_values_as_list('facet_test') [9, 6, 14, 10, 11] >>> res.get_facets() {'facet_test': {'Lorem': 9, 'ipsum': 6, 'amet,': 14, 'dolor': 10, 'sit': 11}} ''' facets = self.get_facets() out = [] if field in facets.keys(): for facetfield in facets[field]: out.append(facets[field][facetfield]) return out else: raise SolrResponseError("No field in facet output")
python
def get_facet_values_as_list(self, field): ''' :param str field: Name of facet field to retrieve values from. Returns facet values as list for a given field. Example:: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':'true', 'facet.field':'facet_test', }) >>> res.get_facet_values_as_list('facet_test') [9, 6, 14, 10, 11] >>> res.get_facets() {'facet_test': {'Lorem': 9, 'ipsum': 6, 'amet,': 14, 'dolor': 10, 'sit': 11}} ''' facets = self.get_facets() out = [] if field in facets.keys(): for facetfield in facets[field]: out.append(facets[field][facetfield]) return out else: raise SolrResponseError("No field in facet output")
[ "def", "get_facet_values_as_list", "(", "self", ",", "field", ")", ":", "facets", "=", "self", ".", "get_facets", "(", ")", "out", "=", "[", "]", "if", "field", "in", "facets", ".", "keys", "(", ")", ":", "for", "facetfield", "in", "facets", "[", "field", "]", ":", "out", ".", "append", "(", "facets", "[", "field", "]", "[", "facetfield", "]", ")", "return", "out", "else", ":", "raise", "SolrResponseError", "(", "\"No field in facet output\"", ")" ]
:param str field: Name of facet field to retrieve values from. Returns facet values as list for a given field. Example:: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':'true', 'facet.field':'facet_test', }) >>> res.get_facet_values_as_list('facet_test') [9, 6, 14, 10, 11] >>> res.get_facets() {'facet_test': {'Lorem': 9, 'ipsum': 6, 'amet,': 14, 'dolor': 10, 'sit': 11}}
[ ":", "param", "str", "field", ":", "Name", "of", "facet", "field", "to", "retrieve", "values", "from", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L281-L305
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_facet_keys_as_list
def get_facet_keys_as_list(self,field): ''' :param str field: Name of facet field to retrieve keys from. Similar to get_facet_values_as_list but returns the list of keys as a list instead. Example:: >>> r.get_facet_keys_as_list('facet_test') ['Lorem', 'ipsum', 'amet,', 'dolor', 'sit'] ''' facets = self.get_facets() if facets == -1: return facets if field in facets.keys(): return [x for x in facets[field]]
python
def get_facet_keys_as_list(self,field): ''' :param str field: Name of facet field to retrieve keys from. Similar to get_facet_values_as_list but returns the list of keys as a list instead. Example:: >>> r.get_facet_keys_as_list('facet_test') ['Lorem', 'ipsum', 'amet,', 'dolor', 'sit'] ''' facets = self.get_facets() if facets == -1: return facets if field in facets.keys(): return [x for x in facets[field]]
[ "def", "get_facet_keys_as_list", "(", "self", ",", "field", ")", ":", "facets", "=", "self", ".", "get_facets", "(", ")", "if", "facets", "==", "-", "1", ":", "return", "facets", "if", "field", "in", "facets", ".", "keys", "(", ")", ":", "return", "[", "x", "for", "x", "in", "facets", "[", "field", "]", "]" ]
:param str field: Name of facet field to retrieve keys from. Similar to get_facet_values_as_list but returns the list of keys as a list instead. Example:: >>> r.get_facet_keys_as_list('facet_test') ['Lorem', 'ipsum', 'amet,', 'dolor', 'sit']
[ ":", "param", "str", "field", ":", "Name", "of", "facet", "field", "to", "retrieve", "keys", "from", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L307-L322
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.json_facet
def json_facet(self, field=None): ''' EXPERIMENTAL Tried to kick back the json.fact output. ''' facets = self.data['facets'] if field is None: temp_fields = [x for x in facets.keys() if x != 'count'] if len(temp_fields) != 1: raise ValueError("field argument not specified and it looks like there is more than one field in facets. Specify the field to get json.facet from. ") field = temp_fields[0] if field not in self.data['facets']: raise ValueError("Facet Field {} Not found in response, available fields are {}".format( field, self.data['facets'].keys() )) return self.data['facets'][field]
python
def json_facet(self, field=None): ''' EXPERIMENTAL Tried to kick back the json.fact output. ''' facets = self.data['facets'] if field is None: temp_fields = [x for x in facets.keys() if x != 'count'] if len(temp_fields) != 1: raise ValueError("field argument not specified and it looks like there is more than one field in facets. Specify the field to get json.facet from. ") field = temp_fields[0] if field not in self.data['facets']: raise ValueError("Facet Field {} Not found in response, available fields are {}".format( field, self.data['facets'].keys() )) return self.data['facets'][field]
[ "def", "json_facet", "(", "self", ",", "field", "=", "None", ")", ":", "facets", "=", "self", ".", "data", "[", "'facets'", "]", "if", "field", "is", "None", ":", "temp_fields", "=", "[", "x", "for", "x", "in", "facets", ".", "keys", "(", ")", "if", "x", "!=", "'count'", "]", "if", "len", "(", "temp_fields", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"field argument not specified and it looks like there is more than one field in facets. Specify the field to get json.facet from. \"", ")", "field", "=", "temp_fields", "[", "0", "]", "if", "field", "not", "in", "self", ".", "data", "[", "'facets'", "]", ":", "raise", "ValueError", "(", "\"Facet Field {} Not found in response, available fields are {}\"", ".", "format", "(", "field", ",", "self", ".", "data", "[", "'facets'", "]", ".", "keys", "(", ")", ")", ")", "return", "self", ".", "data", "[", "'facets'", "]", "[", "field", "]" ]
EXPERIMENTAL Tried to kick back the json.fact output.
[ "EXPERIMENTAL" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L331-L347
train
moonlitesolutions/SolrClient
SolrClient/solrresp.py
SolrResponse.get_jsonfacet_counts_as_dict
def get_jsonfacet_counts_as_dict(self, field, data=None): ''' EXPERIMENTAL Takes facets and returns then as a dictionary that is easier to work with, for example, if you are getting something this:: {'facets': {'count': 50, 'test': {'buckets': [{'count': 10, 'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 79}, {'count': 1, 'unique': 1, 'val': 9}]}, 'pr_sum': 639.0, 'val': 'consectetur'}, {'count': 8, 'pr': {'buckets': [{'count': 1, 'unique': 1, 'val': 9}, {'count': 1, 'unique': 1, 'val': 31}, {'count': 1, 'unique': 1, 'val': 33}]}, 'pr_sum': 420.0, 'val': 'auctor'}, {'count': 8, 'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 94}, {'count': 1, 'unique': 1, 'val': 25}]}, 'pr_sum': 501.0, 'val': 'nulla'}]}}} This should return you something like this:: {'test': {'auctor': {'count': 8, 'pr': {9: {'count': 1, 'unique': 1}, 31: {'count': 1, 'unique': 1}, 33: {'count': 1, 'unique': 1}}, 'pr_sum': 420.0}, 'consectetur': {'count': 10, 'pr': {9: {'count': 1, 'unique': 1}, 79: {'count': 2, 'unique': 1}}, 'pr_sum': 639.0}, 'nulla': {'count': 8, 'pr': {25: {'count': 1, 'unique': 1}, 94: {'count': 2, 'unique': 1}}, 'pr_sum': 501.0}}} ''' data = data if data else self.data['facets'] if field not in data: raise ValueError("Field To start Faceting on not specified.") out = { field: self._json_rec_dict(data[field]['buckets']) } return out
python
def get_jsonfacet_counts_as_dict(self, field, data=None): ''' EXPERIMENTAL Takes facets and returns then as a dictionary that is easier to work with, for example, if you are getting something this:: {'facets': {'count': 50, 'test': {'buckets': [{'count': 10, 'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 79}, {'count': 1, 'unique': 1, 'val': 9}]}, 'pr_sum': 639.0, 'val': 'consectetur'}, {'count': 8, 'pr': {'buckets': [{'count': 1, 'unique': 1, 'val': 9}, {'count': 1, 'unique': 1, 'val': 31}, {'count': 1, 'unique': 1, 'val': 33}]}, 'pr_sum': 420.0, 'val': 'auctor'}, {'count': 8, 'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 94}, {'count': 1, 'unique': 1, 'val': 25}]}, 'pr_sum': 501.0, 'val': 'nulla'}]}}} This should return you something like this:: {'test': {'auctor': {'count': 8, 'pr': {9: {'count': 1, 'unique': 1}, 31: {'count': 1, 'unique': 1}, 33: {'count': 1, 'unique': 1}}, 'pr_sum': 420.0}, 'consectetur': {'count': 10, 'pr': {9: {'count': 1, 'unique': 1}, 79: {'count': 2, 'unique': 1}}, 'pr_sum': 639.0}, 'nulla': {'count': 8, 'pr': {25: {'count': 1, 'unique': 1}, 94: {'count': 2, 'unique': 1}}, 'pr_sum': 501.0}}} ''' data = data if data else self.data['facets'] if field not in data: raise ValueError("Field To start Faceting on not specified.") out = { field: self._json_rec_dict(data[field]['buckets']) } return out
[ "def", "get_jsonfacet_counts_as_dict", "(", "self", ",", "field", ",", "data", "=", "None", ")", ":", "data", "=", "data", "if", "data", "else", "self", ".", "data", "[", "'facets'", "]", "if", "field", "not", "in", "data", ":", "raise", "ValueError", "(", "\"Field To start Faceting on not specified.\"", ")", "out", "=", "{", "field", ":", "self", ".", "_json_rec_dict", "(", "data", "[", "field", "]", "[", "'buckets'", "]", ")", "}", "return", "out" ]
EXPERIMENTAL Takes facets and returns then as a dictionary that is easier to work with, for example, if you are getting something this:: {'facets': {'count': 50, 'test': {'buckets': [{'count': 10, 'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 79}, {'count': 1, 'unique': 1, 'val': 9}]}, 'pr_sum': 639.0, 'val': 'consectetur'}, {'count': 8, 'pr': {'buckets': [{'count': 1, 'unique': 1, 'val': 9}, {'count': 1, 'unique': 1, 'val': 31}, {'count': 1, 'unique': 1, 'val': 33}]}, 'pr_sum': 420.0, 'val': 'auctor'}, {'count': 8, 'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 94}, {'count': 1, 'unique': 1, 'val': 25}]}, 'pr_sum': 501.0, 'val': 'nulla'}]}}} This should return you something like this:: {'test': {'auctor': {'count': 8, 'pr': {9: {'count': 1, 'unique': 1}, 31: {'count': 1, 'unique': 1}, 33: {'count': 1, 'unique': 1}}, 'pr_sum': 420.0}, 'consectetur': {'count': 10, 'pr': {9: {'count': 1, 'unique': 1}, 79: {'count': 2, 'unique': 1}}, 'pr_sum': 639.0}, 'nulla': {'count': 8, 'pr': {25: {'count': 1, 'unique': 1}, 94: {'count': 2, 'unique': 1}}, 'pr_sum': 501.0}}}
[ "EXPERIMENTAL", "Takes", "facets", "and", "returns", "then", "as", "a", "dictionary", "that", "is", "easier", "to", "work", "with", "for", "example", "if", "you", "are", "getting", "something", "this", "::" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L349-L394
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ._gen_file_name
def _gen_file_name(self): ''' Generates a random file name based on self._output_filename_pattern for the output to do file. ''' date = datetime.datetime.now() dt = "{}-{}-{}-{}-{}-{}-{}".format(str(date.year),str(date.month),str(date.day),str(date.hour),str(date.minute),str(date.second),str(random.randint(0,10000))) return self._output_filename_pattern.format(dt)
python
def _gen_file_name(self): ''' Generates a random file name based on self._output_filename_pattern for the output to do file. ''' date = datetime.datetime.now() dt = "{}-{}-{}-{}-{}-{}-{}".format(str(date.year),str(date.month),str(date.day),str(date.hour),str(date.minute),str(date.second),str(random.randint(0,10000))) return self._output_filename_pattern.format(dt)
[ "def", "_gen_file_name", "(", "self", ")", ":", "date", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "dt", "=", "\"{}-{}-{}-{}-{}-{}-{}\"", ".", "format", "(", "str", "(", "date", ".", "year", ")", ",", "str", "(", "date", ".", "month", ")", ",", "str", "(", "date", ".", "day", ")", ",", "str", "(", "date", ".", "hour", ")", ",", "str", "(", "date", ".", "minute", ")", ",", "str", "(", "date", ".", "second", ")", ",", "str", "(", "random", ".", "randint", "(", "0", ",", "10000", ")", ")", ")", "return", "self", ".", "_output_filename_pattern", ".", "format", "(", "dt", ")" ]
Generates a random file name based on self._output_filename_pattern for the output to do file.
[ "Generates", "a", "random", "file", "name", "based", "on", "self", ".", "_output_filename_pattern", "for", "the", "output", "to", "do", "file", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L74-L80
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ.add
def add(self, item=None, finalize=False, callback=None): ''' Takes a string, dictionary or list of items for adding to queue. To help troubleshoot it will output the updated buffer size, however when the content gets written it will output the file path of the new file. Generally this can be safely discarded. :param <dict,list> item: Item to add to the queue. If dict will be converted directly to a list and then to json. List must be a list of dictionaries. If a string is submitted, it will be written out as-is immediately and not buffered. :param bool finalize: If items are buffered internally, it will flush them to disk and return the file name. :param callback: A callback function that will be called when the item gets written to disk. It will be passed one position argument, the file path of the file written. Note that errors from the callback method will not be re-raised here. ''' if item: if type(item) is list: check = list(set([type(d) for d in item])) if len(check) > 1 or dict not in check: raise ValueError("More than one data type detected in item (list). Make sure they are all dicts of data going to Solr") elif type(item) is dict: item = [item] elif type(item) is str: return self._write_file(item) else: raise ValueError("Not the right data submitted. Make sure you are sending a dict or list of dicts") with self._rlock: res = self._preprocess(item, finalize, callback) return res
python
def add(self, item=None, finalize=False, callback=None): ''' Takes a string, dictionary or list of items for adding to queue. To help troubleshoot it will output the updated buffer size, however when the content gets written it will output the file path of the new file. Generally this can be safely discarded. :param <dict,list> item: Item to add to the queue. If dict will be converted directly to a list and then to json. List must be a list of dictionaries. If a string is submitted, it will be written out as-is immediately and not buffered. :param bool finalize: If items are buffered internally, it will flush them to disk and return the file name. :param callback: A callback function that will be called when the item gets written to disk. It will be passed one position argument, the file path of the file written. Note that errors from the callback method will not be re-raised here. ''' if item: if type(item) is list: check = list(set([type(d) for d in item])) if len(check) > 1 or dict not in check: raise ValueError("More than one data type detected in item (list). Make sure they are all dicts of data going to Solr") elif type(item) is dict: item = [item] elif type(item) is str: return self._write_file(item) else: raise ValueError("Not the right data submitted. Make sure you are sending a dict or list of dicts") with self._rlock: res = self._preprocess(item, finalize, callback) return res
[ "def", "add", "(", "self", ",", "item", "=", "None", ",", "finalize", "=", "False", ",", "callback", "=", "None", ")", ":", "if", "item", ":", "if", "type", "(", "item", ")", "is", "list", ":", "check", "=", "list", "(", "set", "(", "[", "type", "(", "d", ")", "for", "d", "in", "item", "]", ")", ")", "if", "len", "(", "check", ")", ">", "1", "or", "dict", "not", "in", "check", ":", "raise", "ValueError", "(", "\"More than one data type detected in item (list). Make sure they are all dicts of data going to Solr\"", ")", "elif", "type", "(", "item", ")", "is", "dict", ":", "item", "=", "[", "item", "]", "elif", "type", "(", "item", ")", "is", "str", ":", "return", "self", ".", "_write_file", "(", "item", ")", "else", ":", "raise", "ValueError", "(", "\"Not the right data submitted. Make sure you are sending a dict or list of dicts\"", ")", "with", "self", ".", "_rlock", ":", "res", "=", "self", ".", "_preprocess", "(", "item", ",", "finalize", ",", "callback", ")", "return", "res" ]
Takes a string, dictionary or list of items for adding to queue. To help troubleshoot it will output the updated buffer size, however when the content gets written it will output the file path of the new file. Generally this can be safely discarded. :param <dict,list> item: Item to add to the queue. If dict will be converted directly to a list and then to json. List must be a list of dictionaries. If a string is submitted, it will be written out as-is immediately and not buffered. :param bool finalize: If items are buffered internally, it will flush them to disk and return the file name. :param callback: A callback function that will be called when the item gets written to disk. It will be passed one position argument, the file path of the file written. Note that errors from the callback method will not be re-raised here.
[ "Takes", "a", "string", "dictionary", "or", "list", "of", "items", "for", "adding", "to", "queue", ".", "To", "help", "troubleshoot", "it", "will", "output", "the", "updated", "buffer", "size", "however", "when", "the", "content", "gets", "written", "it", "will", "output", "the", "file", "path", "of", "the", "new", "file", ".", "Generally", "this", "can", "be", "safely", "discarded", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L83-L104
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ._lock
def _lock(self): ''' Locks, or returns False if already locked ''' if not self._is_locked(): with open(self._lck,'w') as fh: if self._devel: self.logger.debug("Locking") fh.write(str(os.getpid())) return True else: return False
python
def _lock(self): ''' Locks, or returns False if already locked ''' if not self._is_locked(): with open(self._lck,'w') as fh: if self._devel: self.logger.debug("Locking") fh.write(str(os.getpid())) return True else: return False
[ "def", "_lock", "(", "self", ")", ":", "if", "not", "self", ".", "_is_locked", "(", ")", ":", "with", "open", "(", "self", ".", "_lck", ",", "'w'", ")", "as", "fh", ":", "if", "self", ".", "_devel", ":", "self", ".", "logger", ".", "debug", "(", "\"Locking\"", ")", "fh", ".", "write", "(", "str", "(", "os", ".", "getpid", "(", ")", ")", ")", "return", "True", "else", ":", "return", "False" ]
Locks, or returns False if already locked
[ "Locks", "or", "returns", "False", "if", "already", "locked" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L167-L177
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ._is_locked
def _is_locked(self): ''' Checks to see if we are already pulling items from the queue ''' if os.path.isfile(self._lck): try: import psutil except ImportError: return True #Lock file exists and no psutil #If psutil is imported with open(self._lck) as f: pid = f.read() return True if psutil.pid_exists(int(pid)) else False else: return False
python
def _is_locked(self): ''' Checks to see if we are already pulling items from the queue ''' if os.path.isfile(self._lck): try: import psutil except ImportError: return True #Lock file exists and no psutil #If psutil is imported with open(self._lck) as f: pid = f.read() return True if psutil.pid_exists(int(pid)) else False else: return False
[ "def", "_is_locked", "(", "self", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "_lck", ")", ":", "try", ":", "import", "psutil", "except", "ImportError", ":", "return", "True", "#Lock file exists and no psutil", "#If psutil is imported", "with", "open", "(", "self", ".", "_lck", ")", "as", "f", ":", "pid", "=", "f", ".", "read", "(", ")", "return", "True", "if", "psutil", ".", "pid_exists", "(", "int", "(", "pid", ")", ")", "else", "False", "else", ":", "return", "False" ]
Checks to see if we are already pulling items from the queue
[ "Checks", "to", "see", "if", "we", "are", "already", "pulling", "items", "from", "the", "queue" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L180-L194
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ._unlock
def _unlock(self): ''' Unlocks the index ''' if self._devel: self.logger.debug("Unlocking Index") if self._is_locked(): os.remove(self._lck) return True else: return True
python
def _unlock(self): ''' Unlocks the index ''' if self._devel: self.logger.debug("Unlocking Index") if self._is_locked(): os.remove(self._lck) return True else: return True
[ "def", "_unlock", "(", "self", ")", ":", "if", "self", ".", "_devel", ":", "self", ".", "logger", ".", "debug", "(", "\"Unlocking Index\"", ")", "if", "self", ".", "_is_locked", "(", ")", ":", "os", ".", "remove", "(", "self", ".", "_lck", ")", "return", "True", "else", ":", "return", "True" ]
Unlocks the index
[ "Unlocks", "the", "index" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L197-L206
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ.get_all_as_list
def get_all_as_list(self, dir='_todo_dir'): ''' Returns a list of the the full path to all items currently in the todo directory. The items will be listed in ascending order based on filesystem time. This will re-scan the directory on each execution. Do not use this to process items, this method should only be used for troubleshooting or something axillary. To process items use get_todo_items() iterator. ''' dir = getattr(self,dir) list = [x for x in os.listdir(dir) if x.endswith('.json') or x.endswith('.json.gz')] full = [os.path.join(dir,x) for x in list] full.sort(key=lambda x: os.path.getmtime(x)) return full
python
def get_all_as_list(self, dir='_todo_dir'): ''' Returns a list of the the full path to all items currently in the todo directory. The items will be listed in ascending order based on filesystem time. This will re-scan the directory on each execution. Do not use this to process items, this method should only be used for troubleshooting or something axillary. To process items use get_todo_items() iterator. ''' dir = getattr(self,dir) list = [x for x in os.listdir(dir) if x.endswith('.json') or x.endswith('.json.gz')] full = [os.path.join(dir,x) for x in list] full.sort(key=lambda x: os.path.getmtime(x)) return full
[ "def", "get_all_as_list", "(", "self", ",", "dir", "=", "'_todo_dir'", ")", ":", "dir", "=", "getattr", "(", "self", ",", "dir", ")", "list", "=", "[", "x", "for", "x", "in", "os", ".", "listdir", "(", "dir", ")", "if", "x", ".", "endswith", "(", "'.json'", ")", "or", "x", ".", "endswith", "(", "'.json.gz'", ")", "]", "full", "=", "[", "os", ".", "path", ".", "join", "(", "dir", ",", "x", ")", "for", "x", "in", "list", "]", "full", ".", "sort", "(", "key", "=", "lambda", "x", ":", "os", ".", "path", ".", "getmtime", "(", "x", ")", ")", "return", "full" ]
Returns a list of the the full path to all items currently in the todo directory. The items will be listed in ascending order based on filesystem time. This will re-scan the directory on each execution. Do not use this to process items, this method should only be used for troubleshooting or something axillary. To process items use get_todo_items() iterator.
[ "Returns", "a", "list", "of", "the", "the", "full", "path", "to", "all", "items", "currently", "in", "the", "todo", "directory", ".", "The", "items", "will", "be", "listed", "in", "ascending", "order", "based", "on", "filesystem", "time", ".", "This", "will", "re", "-", "scan", "the", "directory", "on", "each", "execution", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L209-L220
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ.get_todo_items
def get_todo_items(self, **kwargs): ''' Returns an iterator that will provide each item in the todo queue. Note that to complete each item you have to run complete method with the output of this iterator. That will move the item to the done directory and prevent it from being retrieved in the future. ''' def inner(self): for item in self.get_all_as_list(): yield item self._unlock() if not self._is_locked(): if self._lock(): return inner(self) raise RuntimeError("RuntimeError: Index Already Locked")
python
def get_todo_items(self, **kwargs): ''' Returns an iterator that will provide each item in the todo queue. Note that to complete each item you have to run complete method with the output of this iterator. That will move the item to the done directory and prevent it from being retrieved in the future. ''' def inner(self): for item in self.get_all_as_list(): yield item self._unlock() if not self._is_locked(): if self._lock(): return inner(self) raise RuntimeError("RuntimeError: Index Already Locked")
[ "def", "get_todo_items", "(", "self", ",", "*", "*", "kwargs", ")", ":", "def", "inner", "(", "self", ")", ":", "for", "item", "in", "self", ".", "get_all_as_list", "(", ")", ":", "yield", "item", "self", ".", "_unlock", "(", ")", "if", "not", "self", ".", "_is_locked", "(", ")", ":", "if", "self", ".", "_lock", "(", ")", ":", "return", "inner", "(", "self", ")", "raise", "RuntimeError", "(", "\"RuntimeError: Index Already Locked\"", ")" ]
Returns an iterator that will provide each item in the todo queue. Note that to complete each item you have to run complete method with the output of this iterator. That will move the item to the done directory and prevent it from being retrieved in the future.
[ "Returns", "an", "iterator", "that", "will", "provide", "each", "item", "in", "the", "todo", "queue", ".", "Note", "that", "to", "complete", "each", "item", "you", "have", "to", "run", "complete", "method", "with", "the", "output", "of", "this", "iterator", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L223-L237
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ.complete
def complete(self, filepath): ''' Marks the item as complete by moving it to the done directory and optionally gzipping it. ''' if not os.path.exists(filepath): raise FileNotFoundError("Can't Complete {}, it doesn't exist".format(filepath)) if self._devel: self.logger.debug("Completing - {} ".format(filepath)) if self.rotate_complete: try: complete_dir = str(self.rotate_complete()) except Exception as e: self.logger.error("rotate_complete function failed with the following exception.") self.logger.exception(e) raise newdir = os.path.join(self._done_dir, complete_dir) newpath = os.path.join(newdir, os.path.split(filepath)[-1] ) if not os.path.isdir(newdir): self.logger.debug("Making new directory: {}".format(newdir)) os.makedirs(newdir) else: newpath = os.path.join(self._done_dir, os.path.split(filepath)[-1] ) try: if self._compress_complete: if not filepath.endswith('.gz'): # Compressing complete, but existing file not compressed # Compress and move it and kick out newpath += '.gz' self._compress_and_move(filepath, newpath) return newpath # else the file is already compressed and can just be moved #if not compressing completed file, just move it shutil.move(filepath, newpath) self.logger.info(" Completed - {}".format(filepath)) except Exception as e: self.logger.error("Couldn't Complete {}".format(filepath)) self.logger.exception(e) raise return newpath
python
def complete(self, filepath): ''' Marks the item as complete by moving it to the done directory and optionally gzipping it. ''' if not os.path.exists(filepath): raise FileNotFoundError("Can't Complete {}, it doesn't exist".format(filepath)) if self._devel: self.logger.debug("Completing - {} ".format(filepath)) if self.rotate_complete: try: complete_dir = str(self.rotate_complete()) except Exception as e: self.logger.error("rotate_complete function failed with the following exception.") self.logger.exception(e) raise newdir = os.path.join(self._done_dir, complete_dir) newpath = os.path.join(newdir, os.path.split(filepath)[-1] ) if not os.path.isdir(newdir): self.logger.debug("Making new directory: {}".format(newdir)) os.makedirs(newdir) else: newpath = os.path.join(self._done_dir, os.path.split(filepath)[-1] ) try: if self._compress_complete: if not filepath.endswith('.gz'): # Compressing complete, but existing file not compressed # Compress and move it and kick out newpath += '.gz' self._compress_and_move(filepath, newpath) return newpath # else the file is already compressed and can just be moved #if not compressing completed file, just move it shutil.move(filepath, newpath) self.logger.info(" Completed - {}".format(filepath)) except Exception as e: self.logger.error("Couldn't Complete {}".format(filepath)) self.logger.exception(e) raise return newpath
[ "def", "complete", "(", "self", ",", "filepath", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "raise", "FileNotFoundError", "(", "\"Can't Complete {}, it doesn't exist\"", ".", "format", "(", "filepath", ")", ")", "if", "self", ".", "_devel", ":", "self", ".", "logger", ".", "debug", "(", "\"Completing - {} \"", ".", "format", "(", "filepath", ")", ")", "if", "self", ".", "rotate_complete", ":", "try", ":", "complete_dir", "=", "str", "(", "self", ".", "rotate_complete", "(", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "\"rotate_complete function failed with the following exception.\"", ")", "self", ".", "logger", ".", "exception", "(", "e", ")", "raise", "newdir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_done_dir", ",", "complete_dir", ")", "newpath", "=", "os", ".", "path", ".", "join", "(", "newdir", ",", "os", ".", "path", ".", "split", "(", "filepath", ")", "[", "-", "1", "]", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "newdir", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Making new directory: {}\"", ".", "format", "(", "newdir", ")", ")", "os", ".", "makedirs", "(", "newdir", ")", "else", ":", "newpath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_done_dir", ",", "os", ".", "path", ".", "split", "(", "filepath", ")", "[", "-", "1", "]", ")", "try", ":", "if", "self", ".", "_compress_complete", ":", "if", "not", "filepath", ".", "endswith", "(", "'.gz'", ")", ":", "# Compressing complete, but existing file not compressed", "# Compress and move it and kick out", "newpath", "+=", "'.gz'", "self", ".", "_compress_and_move", "(", "filepath", ",", "newpath", ")", "return", "newpath", "# else the file is already compressed and can just be moved", "#if not compressing completed file, just move it", "shutil", ".", "move", "(", "filepath", ",", "newpath", ")", "self", ".", "logger", ".", "info", "(", "\" Completed - {}\"", ".", "format", "(", "filepath", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "\"Couldn't Complete {}\"", ".", "format", "(", "filepath", ")", ")", "self", ".", "logger", ".", "exception", "(", "e", ")", "raise", "return", "newpath" ]
Marks the item as complete by moving it to the done directory and optionally gzipping it.
[ "Marks", "the", "item", "as", "complete", "by", "moving", "it", "to", "the", "done", "directory", "and", "optionally", "gzipping", "it", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L240-L279
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ.index
def index(self, solr, collection, threads=1, send_method='stream_file', **kwargs): ''' Will index the queue into a specified solr instance and collection. Specify multiple threads to make this faster, however keep in mind that if you specify multiple threads the items may not be in order. Example:: solr = SolrClient('http://localhost:8983/solr/') for doc in self.docs: index.add(doc, finalize=True) index.index(solr,'SolrClient_unittest') :param object solr: SolrClient object. :param string collection: The name of the collection to index document into. :param int threads: Number of simultaneous threads to spin up for indexing. :param string send_method: SolrClient method to execute for indexing. Default is stream_file ''' try: method = getattr(solr, send_method) except AttributeError: raise AttributeError("Couldn't find the send_method. Specify either stream_file or local_index") self.logger.info("Indexing {} into {} using {}".format(self._queue_name, collection, send_method)) if threads > 1: if hasattr(collection, '__call__'): self.logger.debug("Overwriting send_method to index_json") method = getattr(solr, 'index_json') method = partial(self._wrap_dynamic, method, collection) else: method = partial(self._wrap, method, collection) with ThreadPool(threads) as p: p.map(method, self.get_todo_items()) else: for todo_file in self.get_todo_items(): try: result = method(collection, todo_file) if result: self.complete(todo_file) except SolrError: self.logger.error("Error Indexing Item: {}".format(todo_file)) self._unlock() raise
python
def index(self, solr, collection, threads=1, send_method='stream_file', **kwargs): ''' Will index the queue into a specified solr instance and collection. Specify multiple threads to make this faster, however keep in mind that if you specify multiple threads the items may not be in order. Example:: solr = SolrClient('http://localhost:8983/solr/') for doc in self.docs: index.add(doc, finalize=True) index.index(solr,'SolrClient_unittest') :param object solr: SolrClient object. :param string collection: The name of the collection to index document into. :param int threads: Number of simultaneous threads to spin up for indexing. :param string send_method: SolrClient method to execute for indexing. Default is stream_file ''' try: method = getattr(solr, send_method) except AttributeError: raise AttributeError("Couldn't find the send_method. Specify either stream_file or local_index") self.logger.info("Indexing {} into {} using {}".format(self._queue_name, collection, send_method)) if threads > 1: if hasattr(collection, '__call__'): self.logger.debug("Overwriting send_method to index_json") method = getattr(solr, 'index_json') method = partial(self._wrap_dynamic, method, collection) else: method = partial(self._wrap, method, collection) with ThreadPool(threads) as p: p.map(method, self.get_todo_items()) else: for todo_file in self.get_todo_items(): try: result = method(collection, todo_file) if result: self.complete(todo_file) except SolrError: self.logger.error("Error Indexing Item: {}".format(todo_file)) self._unlock() raise
[ "def", "index", "(", "self", ",", "solr", ",", "collection", ",", "threads", "=", "1", ",", "send_method", "=", "'stream_file'", ",", "*", "*", "kwargs", ")", ":", "try", ":", "method", "=", "getattr", "(", "solr", ",", "send_method", ")", "except", "AttributeError", ":", "raise", "AttributeError", "(", "\"Couldn't find the send_method. Specify either stream_file or local_index\"", ")", "self", ".", "logger", ".", "info", "(", "\"Indexing {} into {} using {}\"", ".", "format", "(", "self", ".", "_queue_name", ",", "collection", ",", "send_method", ")", ")", "if", "threads", ">", "1", ":", "if", "hasattr", "(", "collection", ",", "'__call__'", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Overwriting send_method to index_json\"", ")", "method", "=", "getattr", "(", "solr", ",", "'index_json'", ")", "method", "=", "partial", "(", "self", ".", "_wrap_dynamic", ",", "method", ",", "collection", ")", "else", ":", "method", "=", "partial", "(", "self", ".", "_wrap", ",", "method", ",", "collection", ")", "with", "ThreadPool", "(", "threads", ")", "as", "p", ":", "p", ".", "map", "(", "method", ",", "self", ".", "get_todo_items", "(", ")", ")", "else", ":", "for", "todo_file", "in", "self", ".", "get_todo_items", "(", ")", ":", "try", ":", "result", "=", "method", "(", "collection", ",", "todo_file", ")", "if", "result", ":", "self", ".", "complete", "(", "todo_file", ")", "except", "SolrError", ":", "self", ".", "logger", ".", "error", "(", "\"Error Indexing Item: {}\"", ".", "format", "(", "todo_file", ")", ")", "self", ".", "_unlock", "(", ")", "raise" ]
Will index the queue into a specified solr instance and collection. Specify multiple threads to make this faster, however keep in mind that if you specify multiple threads the items may not be in order. Example:: solr = SolrClient('http://localhost:8983/solr/') for doc in self.docs: index.add(doc, finalize=True) index.index(solr,'SolrClient_unittest') :param object solr: SolrClient object. :param string collection: The name of the collection to index document into. :param int threads: Number of simultaneous threads to spin up for indexing. :param string send_method: SolrClient method to execute for indexing. Default is stream_file
[ "Will", "index", "the", "queue", "into", "a", "specified", "solr", "instance", "and", "collection", ".", "Specify", "multiple", "threads", "to", "make", "this", "faster", "however", "keep", "in", "mind", "that", "if", "you", "specify", "multiple", "threads", "the", "items", "may", "not", "be", "in", "order", ".", "Example", "::", "solr", "=", "SolrClient", "(", "http", ":", "//", "localhost", ":", "8983", "/", "solr", "/", ")", "for", "doc", "in", "self", ".", "docs", ":", "index", ".", "add", "(", "doc", "finalize", "=", "True", ")", "index", ".", "index", "(", "solr", "SolrClient_unittest", ")" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L295-L336
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ.get_all_json_from_indexq
def get_all_json_from_indexq(self): ''' Gets all data from the todo files in indexq and returns one huge list of all data. ''' files = self.get_all_as_list() out = [] for efile in files: out.extend(self._open_file(efile)) return out
python
def get_all_json_from_indexq(self): ''' Gets all data from the todo files in indexq and returns one huge list of all data. ''' files = self.get_all_as_list() out = [] for efile in files: out.extend(self._open_file(efile)) return out
[ "def", "get_all_json_from_indexq", "(", "self", ")", ":", "files", "=", "self", ".", "get_all_as_list", "(", ")", "out", "=", "[", "]", "for", "efile", "in", "files", ":", "out", ".", "extend", "(", "self", ".", "_open_file", "(", "efile", ")", ")", "return", "out" ]
Gets all data from the todo files in indexq and returns one huge list of all data.
[ "Gets", "all", "data", "from", "the", "todo", "files", "in", "indexq", "and", "returns", "one", "huge", "list", "of", "all", "data", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L389-L397
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
IndexQ.get_multi_q
def get_multi_q(self, sentinel='STOP'): ''' This helps indexq operate in multiprocessing environment without each process having to have it's own IndexQ. It also is a handy way to deal with thread / process safety. This method will create and return a JoinableQueue object. Additionally, it will kick off a back end process that will monitor the queue, de-queue items and add them to this indexq. The returned JoinableQueue object can be safely passed to multiple worker processes to populate it with data. To indicate that you are done writing the data to the queue, pass in the sentinel value ('STOP' by default). Make sure you call join_indexer() after you are done to close out the queue and join the worker. ''' self.in_q = JoinableQueue() self.indexer_process = Process(target=self._indexer_process, args=(self.in_q, sentinel)) self.indexer_process.daemon = False self.indexer_process.start() return self.in_q
python
def get_multi_q(self, sentinel='STOP'): ''' This helps indexq operate in multiprocessing environment without each process having to have it's own IndexQ. It also is a handy way to deal with thread / process safety. This method will create and return a JoinableQueue object. Additionally, it will kick off a back end process that will monitor the queue, de-queue items and add them to this indexq. The returned JoinableQueue object can be safely passed to multiple worker processes to populate it with data. To indicate that you are done writing the data to the queue, pass in the sentinel value ('STOP' by default). Make sure you call join_indexer() after you are done to close out the queue and join the worker. ''' self.in_q = JoinableQueue() self.indexer_process = Process(target=self._indexer_process, args=(self.in_q, sentinel)) self.indexer_process.daemon = False self.indexer_process.start() return self.in_q
[ "def", "get_multi_q", "(", "self", ",", "sentinel", "=", "'STOP'", ")", ":", "self", ".", "in_q", "=", "JoinableQueue", "(", ")", "self", ".", "indexer_process", "=", "Process", "(", "target", "=", "self", ".", "_indexer_process", ",", "args", "=", "(", "self", ".", "in_q", ",", "sentinel", ")", ")", "self", ".", "indexer_process", ".", "daemon", "=", "False", "self", ".", "indexer_process", ".", "start", "(", ")", "return", "self", ".", "in_q" ]
This helps indexq operate in multiprocessing environment without each process having to have it's own IndexQ. It also is a handy way to deal with thread / process safety. This method will create and return a JoinableQueue object. Additionally, it will kick off a back end process that will monitor the queue, de-queue items and add them to this indexq. The returned JoinableQueue object can be safely passed to multiple worker processes to populate it with data. To indicate that you are done writing the data to the queue, pass in the sentinel value ('STOP' by default). Make sure you call join_indexer() after you are done to close out the queue and join the worker.
[ "This", "helps", "indexq", "operate", "in", "multiprocessing", "environment", "without", "each", "process", "having", "to", "have", "it", "s", "own", "IndexQ", ".", "It", "also", "is", "a", "handy", "way", "to", "deal", "with", "thread", "/", "process", "safety", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L408-L424
train
moonlitesolutions/SolrClient
SolrClient/transport/transportbase.py
TransportBase._retry
def _retry(function): """ Internal mechanism to try to send data to multiple Solr Hosts if the query fails on the first one. """ def inner(self, **kwargs): last_exception = None #for host in self.router.get_hosts(**kwargs): for host in self.host: try: return function(self, host, **kwargs) except SolrError as e: self.logger.exception(e) raise except ConnectionError as e: self.logger.exception("Tried connecting to Solr, but couldn't because of the following exception.") if '401' in e.__str__(): raise last_exception = e # raise the last exception after contacting all hosts instead of returning None if last_exception is not None: raise last_exception return inner
python
def _retry(function): """ Internal mechanism to try to send data to multiple Solr Hosts if the query fails on the first one. """ def inner(self, **kwargs): last_exception = None #for host in self.router.get_hosts(**kwargs): for host in self.host: try: return function(self, host, **kwargs) except SolrError as e: self.logger.exception(e) raise except ConnectionError as e: self.logger.exception("Tried connecting to Solr, but couldn't because of the following exception.") if '401' in e.__str__(): raise last_exception = e # raise the last exception after contacting all hosts instead of returning None if last_exception is not None: raise last_exception return inner
[ "def", "_retry", "(", "function", ")", ":", "def", "inner", "(", "self", ",", "*", "*", "kwargs", ")", ":", "last_exception", "=", "None", "#for host in self.router.get_hosts(**kwargs):", "for", "host", "in", "self", ".", "host", ":", "try", ":", "return", "function", "(", "self", ",", "host", ",", "*", "*", "kwargs", ")", "except", "SolrError", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "e", ")", "raise", "except", "ConnectionError", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "\"Tried connecting to Solr, but couldn't because of the following exception.\"", ")", "if", "'401'", "in", "e", ".", "__str__", "(", ")", ":", "raise", "last_exception", "=", "e", "# raise the last exception after contacting all hosts instead of returning None", "if", "last_exception", "is", "not", "None", ":", "raise", "last_exception", "return", "inner" ]
Internal mechanism to try to send data to multiple Solr Hosts if the query fails on the first one.
[ "Internal", "mechanism", "to", "try", "to", "send", "data", "to", "multiple", "Solr", "Hosts", "if", "the", "query", "fails", "on", "the", "first", "one", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/transport/transportbase.py#L28-L51
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.commit
def commit(self, collection, openSearcher=False, softCommit=False, waitSearcher=True, commit=True, **kwargs): """ :param str collection: The name of the collection for the request :param bool openSearcher: If new searcher is to be opened :param bool softCommit: SoftCommit :param bool waitServer: Blocks until the new searcher is opened :param bool commit: Commit Sends a commit to a Solr collection. """ comm = { 'openSearcher': str(openSearcher).lower(), 'softCommit': str(softCommit).lower(), 'waitSearcher': str(waitSearcher).lower(), 'commit': str(commit).lower() } self.logger.debug("Sending Commit to Collection {}".format(collection)) try: resp, con_inf = self.transport.send_request(method='GET', endpoint='update', collection=collection, params=comm, **kwargs) except Exception as e: raise self.logger.debug("Commit Successful, QTime is {}".format(resp['responseHeader']['QTime']))
python
def commit(self, collection, openSearcher=False, softCommit=False, waitSearcher=True, commit=True, **kwargs): """ :param str collection: The name of the collection for the request :param bool openSearcher: If new searcher is to be opened :param bool softCommit: SoftCommit :param bool waitServer: Blocks until the new searcher is opened :param bool commit: Commit Sends a commit to a Solr collection. """ comm = { 'openSearcher': str(openSearcher).lower(), 'softCommit': str(softCommit).lower(), 'waitSearcher': str(waitSearcher).lower(), 'commit': str(commit).lower() } self.logger.debug("Sending Commit to Collection {}".format(collection)) try: resp, con_inf = self.transport.send_request(method='GET', endpoint='update', collection=collection, params=comm, **kwargs) except Exception as e: raise self.logger.debug("Commit Successful, QTime is {}".format(resp['responseHeader']['QTime']))
[ "def", "commit", "(", "self", ",", "collection", ",", "openSearcher", "=", "False", ",", "softCommit", "=", "False", ",", "waitSearcher", "=", "True", ",", "commit", "=", "True", ",", "*", "*", "kwargs", ")", ":", "comm", "=", "{", "'openSearcher'", ":", "str", "(", "openSearcher", ")", ".", "lower", "(", ")", ",", "'softCommit'", ":", "str", "(", "softCommit", ")", ".", "lower", "(", ")", ",", "'waitSearcher'", ":", "str", "(", "waitSearcher", ")", ".", "lower", "(", ")", ",", "'commit'", ":", "str", "(", "commit", ")", ".", "lower", "(", ")", "}", "self", ".", "logger", ".", "debug", "(", "\"Sending Commit to Collection {}\"", ".", "format", "(", "collection", ")", ")", "try", ":", "resp", ",", "con_inf", "=", "self", ".", "transport", ".", "send_request", "(", "method", "=", "'GET'", ",", "endpoint", "=", "'update'", ",", "collection", "=", "collection", ",", "params", "=", "comm", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "raise", "self", ".", "logger", ".", "debug", "(", "\"Commit Successful, QTime is {}\"", ".", "format", "(", "resp", "[", "'responseHeader'", "]", "[", "'QTime'", "]", ")", ")" ]
:param str collection: The name of the collection for the request :param bool openSearcher: If new searcher is to be opened :param bool softCommit: SoftCommit :param bool waitServer: Blocks until the new searcher is opened :param bool commit: Commit Sends a commit to a Solr collection.
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ":", "param", "bool", "openSearcher", ":", "If", "new", "searcher", "is", "to", "be", "opened", ":", "param", "bool", "softCommit", ":", "SoftCommit", ":", "param", "bool", "waitServer", ":", "Blocks", "until", "the", "new", "searcher", "is", "opened", ":", "param", "bool", "commit", ":", "Commit" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L39-L64
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.query_raw
def query_raw(self, collection, query, request_handler='select', **kwargs): """ :param str collection: The name of the collection for the request :param str request_handler: Request handler, default is 'select' :param dict query: Python dictionary of Solr query parameters. Sends a query to Solr, returns a dict. `query` should be a dictionary of solr request handler arguments. Example:: res = solr.query_raw('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.field':'facet_test', }) """ headers = {'content-type': 'application/x-www-form-urlencoded'} data = query resp, con_inf = self.transport.send_request(method='POST', endpoint=request_handler, collection=collection, data=data, headers=headers, **kwargs) return resp
python
def query_raw(self, collection, query, request_handler='select', **kwargs): """ :param str collection: The name of the collection for the request :param str request_handler: Request handler, default is 'select' :param dict query: Python dictionary of Solr query parameters. Sends a query to Solr, returns a dict. `query` should be a dictionary of solr request handler arguments. Example:: res = solr.query_raw('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.field':'facet_test', }) """ headers = {'content-type': 'application/x-www-form-urlencoded'} data = query resp, con_inf = self.transport.send_request(method='POST', endpoint=request_handler, collection=collection, data=data, headers=headers, **kwargs) return resp
[ "def", "query_raw", "(", "self", ",", "collection", ",", "query", ",", "request_handler", "=", "'select'", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "{", "'content-type'", ":", "'application/x-www-form-urlencoded'", "}", "data", "=", "query", "resp", ",", "con_inf", "=", "self", ".", "transport", ".", "send_request", "(", "method", "=", "'POST'", ",", "endpoint", "=", "request_handler", ",", "collection", "=", "collection", ",", "data", "=", "data", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")", "return", "resp" ]
:param str collection: The name of the collection for the request :param str request_handler: Request handler, default is 'select' :param dict query: Python dictionary of Solr query parameters. Sends a query to Solr, returns a dict. `query` should be a dictionary of solr request handler arguments. Example:: res = solr.query_raw('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.field':'facet_test', })
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ":", "param", "str", "request_handler", ":", "Request", "handler", "default", "is", "select", ":", "param", "dict", "query", ":", "Python", "dictionary", "of", "Solr", "query", "parameters", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L66-L90
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.query
def query(self, collection, query, request_handler='select', **kwargs): """ :param str collection: The name of the collection for the request :param str request_handler: Request handler, default is 'select' :param dict query: Python dictonary of Solr query parameters. Sends a query to Solr, returns a SolrResults Object. `query` should be a dictionary of solr request handler arguments. Example:: res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.field':'facet_test', }) """ for field in ['facet.pivot']: if field in query.keys(): if type(query[field]) is str: query[field] = query[field].replace(' ', '') elif type(query[field]) is list: query[field] = [s.replace(' ', '') for s in query[field]] method = 'POST' headers = {'content-type': 'application/x-www-form-urlencoded'} params = query data = {} resp, con_inf = self.transport.send_request(method=method, endpoint=request_handler, collection=collection, params=params, data=data, headers=headers, **kwargs) if resp: resp = SolrResponse(resp) resp.url = con_inf['url'] return resp
python
def query(self, collection, query, request_handler='select', **kwargs): """ :param str collection: The name of the collection for the request :param str request_handler: Request handler, default is 'select' :param dict query: Python dictonary of Solr query parameters. Sends a query to Solr, returns a SolrResults Object. `query` should be a dictionary of solr request handler arguments. Example:: res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.field':'facet_test', }) """ for field in ['facet.pivot']: if field in query.keys(): if type(query[field]) is str: query[field] = query[field].replace(' ', '') elif type(query[field]) is list: query[field] = [s.replace(' ', '') for s in query[field]] method = 'POST' headers = {'content-type': 'application/x-www-form-urlencoded'} params = query data = {} resp, con_inf = self.transport.send_request(method=method, endpoint=request_handler, collection=collection, params=params, data=data, headers=headers, **kwargs) if resp: resp = SolrResponse(resp) resp.url = con_inf['url'] return resp
[ "def", "query", "(", "self", ",", "collection", ",", "query", ",", "request_handler", "=", "'select'", ",", "*", "*", "kwargs", ")", ":", "for", "field", "in", "[", "'facet.pivot'", "]", ":", "if", "field", "in", "query", ".", "keys", "(", ")", ":", "if", "type", "(", "query", "[", "field", "]", ")", "is", "str", ":", "query", "[", "field", "]", "=", "query", "[", "field", "]", ".", "replace", "(", "' '", ",", "''", ")", "elif", "type", "(", "query", "[", "field", "]", ")", "is", "list", ":", "query", "[", "field", "]", "=", "[", "s", ".", "replace", "(", "' '", ",", "''", ")", "for", "s", "in", "query", "[", "field", "]", "]", "method", "=", "'POST'", "headers", "=", "{", "'content-type'", ":", "'application/x-www-form-urlencoded'", "}", "params", "=", "query", "data", "=", "{", "}", "resp", ",", "con_inf", "=", "self", ".", "transport", ".", "send_request", "(", "method", "=", "method", ",", "endpoint", "=", "request_handler", ",", "collection", "=", "collection", ",", "params", "=", "params", ",", "data", "=", "data", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")", "if", "resp", ":", "resp", "=", "SolrResponse", "(", "resp", ")", "resp", ".", "url", "=", "con_inf", "[", "'url'", "]", "return", "resp" ]
:param str collection: The name of the collection for the request :param str request_handler: Request handler, default is 'select' :param dict query: Python dictonary of Solr query parameters. Sends a query to Solr, returns a SolrResults Object. `query` should be a dictionary of solr request handler arguments. Example:: res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.field':'facet_test', })
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ":", "param", "str", "request_handler", ":", "Request", "handler", "default", "is", "select", ":", "param", "dict", "query", ":", "Python", "dictonary", "of", "Solr", "query", "parameters", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L92-L129
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.index
def index(self, collection, docs, params=None, min_rf=None, **kwargs): """ :param str collection: The name of the collection for the request. :param docs list docs: List of dicts. ex: [{"title": "testing solr indexing", "id": "test1"}] :param min_rf int min_rf: Required number of replicas to write to' Sends supplied list of dicts to solr for indexing. :: >>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}] >>> solr.index('SolrClient_unittest', docs) """ data = json.dumps(docs) return self.index_json(collection, data, params, min_rf=min_rf, **kwargs)
python
def index(self, collection, docs, params=None, min_rf=None, **kwargs): """ :param str collection: The name of the collection for the request. :param docs list docs: List of dicts. ex: [{"title": "testing solr indexing", "id": "test1"}] :param min_rf int min_rf: Required number of replicas to write to' Sends supplied list of dicts to solr for indexing. :: >>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}] >>> solr.index('SolrClient_unittest', docs) """ data = json.dumps(docs) return self.index_json(collection, data, params, min_rf=min_rf, **kwargs)
[ "def", "index", "(", "self", ",", "collection", ",", "docs", ",", "params", "=", "None", ",", "min_rf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "json", ".", "dumps", "(", "docs", ")", "return", "self", ".", "index_json", "(", "collection", ",", "data", ",", "params", ",", "min_rf", "=", "min_rf", ",", "*", "*", "kwargs", ")" ]
:param str collection: The name of the collection for the request. :param docs list docs: List of dicts. ex: [{"title": "testing solr indexing", "id": "test1"}] :param min_rf int min_rf: Required number of replicas to write to' Sends supplied list of dicts to solr for indexing. :: >>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}] >>> solr.index('SolrClient_unittest', docs)
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ".", ":", "param", "docs", "list", "docs", ":", "List", "of", "dicts", ".", "ex", ":", "[", "{", "title", ":", "testing", "solr", "indexing", "id", ":", "test1", "}", "]", ":", "param", "min_rf", "int", "min_rf", ":", "Required", "number", "of", "replicas", "to", "write", "to" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L131-L144
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.index_json
def index_json(self, collection, data, params=None, min_rf=None, **kwargs): """ :param str collection: The name of the collection for the request. :param data str data: Valid Solr JSON as a string. ex: '[{"title": "testing solr indexing", "id": "test1"}]' :param min_rf int min_rf: Required number of replicas to write to' Sends supplied json to solr for indexing, supplied JSON must be a list of dictionaries. :: >>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}] >>> solr.index_json('SolrClient_unittest',json.dumps(docs)) """ if params is None: params = {} resp, con_inf = self.transport.send_request(method='POST', endpoint='update', collection=collection, data=data, params=params, min_rf=min_rf, **kwargs) if min_rf is not None: rf = resp['responseHeader']['rf'] if rf < min_rf: raise MinRfError("couldn't satisfy rf:%s min_rf:%s" % (rf, min_rf), rf=rf, min_rf=min_rf) if resp['responseHeader']['status'] == 0: return True return False
python
def index_json(self, collection, data, params=None, min_rf=None, **kwargs): """ :param str collection: The name of the collection for the request. :param data str data: Valid Solr JSON as a string. ex: '[{"title": "testing solr indexing", "id": "test1"}]' :param min_rf int min_rf: Required number of replicas to write to' Sends supplied json to solr for indexing, supplied JSON must be a list of dictionaries. :: >>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}] >>> solr.index_json('SolrClient_unittest',json.dumps(docs)) """ if params is None: params = {} resp, con_inf = self.transport.send_request(method='POST', endpoint='update', collection=collection, data=data, params=params, min_rf=min_rf, **kwargs) if min_rf is not None: rf = resp['responseHeader']['rf'] if rf < min_rf: raise MinRfError("couldn't satisfy rf:%s min_rf:%s" % (rf, min_rf), rf=rf, min_rf=min_rf) if resp['responseHeader']['status'] == 0: return True return False
[ "def", "index_json", "(", "self", ",", "collection", ",", "data", ",", "params", "=", "None", ",", "min_rf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "resp", ",", "con_inf", "=", "self", ".", "transport", ".", "send_request", "(", "method", "=", "'POST'", ",", "endpoint", "=", "'update'", ",", "collection", "=", "collection", ",", "data", "=", "data", ",", "params", "=", "params", ",", "min_rf", "=", "min_rf", ",", "*", "*", "kwargs", ")", "if", "min_rf", "is", "not", "None", ":", "rf", "=", "resp", "[", "'responseHeader'", "]", "[", "'rf'", "]", "if", "rf", "<", "min_rf", ":", "raise", "MinRfError", "(", "\"couldn't satisfy rf:%s min_rf:%s\"", "%", "(", "rf", ",", "min_rf", ")", ",", "rf", "=", "rf", ",", "min_rf", "=", "min_rf", ")", "if", "resp", "[", "'responseHeader'", "]", "[", "'status'", "]", "==", "0", ":", "return", "True", "return", "False" ]
:param str collection: The name of the collection for the request. :param data str data: Valid Solr JSON as a string. ex: '[{"title": "testing solr indexing", "id": "test1"}]' :param min_rf int min_rf: Required number of replicas to write to' Sends supplied json to solr for indexing, supplied JSON must be a list of dictionaries. :: >>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}] >>> solr.index_json('SolrClient_unittest',json.dumps(docs))
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ".", ":", "param", "data", "str", "data", ":", "Valid", "Solr", "JSON", "as", "a", "string", ".", "ex", ":", "[", "{", "title", ":", "testing", "solr", "indexing", "id", ":", "test1", "}", "]", ":", "param", "min_rf", "int", "min_rf", ":", "Required", "number", "of", "replicas", "to", "write", "to" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L146-L175
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.get
def get(self, collection, doc_id, **kwargs): """ :param str collection: The name of the collection for the request :param str doc_id: ID of the document to be retrieved. Retrieve document from Solr based on the ID. :: >>> solr.get('SolrClient_unittest','changeme') """ resp, con_inf = self.transport.send_request(method='GET', endpoint='get', collection=collection, params={'id': doc_id}, **kwargs) if 'doc' in resp and resp['doc']: return resp['doc'] raise NotFoundError
python
def get(self, collection, doc_id, **kwargs): """ :param str collection: The name of the collection for the request :param str doc_id: ID of the document to be retrieved. Retrieve document from Solr based on the ID. :: >>> solr.get('SolrClient_unittest','changeme') """ resp, con_inf = self.transport.send_request(method='GET', endpoint='get', collection=collection, params={'id': doc_id}, **kwargs) if 'doc' in resp and resp['doc']: return resp['doc'] raise NotFoundError
[ "def", "get", "(", "self", ",", "collection", ",", "doc_id", ",", "*", "*", "kwargs", ")", ":", "resp", ",", "con_inf", "=", "self", ".", "transport", ".", "send_request", "(", "method", "=", "'GET'", ",", "endpoint", "=", "'get'", ",", "collection", "=", "collection", ",", "params", "=", "{", "'id'", ":", "doc_id", "}", ",", "*", "*", "kwargs", ")", "if", "'doc'", "in", "resp", "and", "resp", "[", "'doc'", "]", ":", "return", "resp", "[", "'doc'", "]", "raise", "NotFoundError" ]
:param str collection: The name of the collection for the request :param str doc_id: ID of the document to be retrieved. Retrieve document from Solr based on the ID. :: >>> solr.get('SolrClient_unittest','changeme')
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ":", "param", "str", "doc_id", ":", "ID", "of", "the", "document", "to", "be", "retrieved", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L177-L194
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.mget
def mget(self, collection, doc_ids, **kwargs): """ :param str collection: The name of the collection for the request :param tuple doc_ids: ID of the document to be retrieved. Retrieve documents from Solr based on the ID. :: >>> solr.get('SolrClient_unittest','changeme') """ resp, con_inf = self.transport.send_request(method='GET', endpoint='get', collection=collection, params={'ids': doc_ids}, **kwargs) if 'docs' in resp['response']: return resp['response']['docs'] raise NotFoundError
python
def mget(self, collection, doc_ids, **kwargs): """ :param str collection: The name of the collection for the request :param tuple doc_ids: ID of the document to be retrieved. Retrieve documents from Solr based on the ID. :: >>> solr.get('SolrClient_unittest','changeme') """ resp, con_inf = self.transport.send_request(method='GET', endpoint='get', collection=collection, params={'ids': doc_ids}, **kwargs) if 'docs' in resp['response']: return resp['response']['docs'] raise NotFoundError
[ "def", "mget", "(", "self", ",", "collection", ",", "doc_ids", ",", "*", "*", "kwargs", ")", ":", "resp", ",", "con_inf", "=", "self", ".", "transport", ".", "send_request", "(", "method", "=", "'GET'", ",", "endpoint", "=", "'get'", ",", "collection", "=", "collection", ",", "params", "=", "{", "'ids'", ":", "doc_ids", "}", ",", "*", "*", "kwargs", ")", "if", "'docs'", "in", "resp", "[", "'response'", "]", ":", "return", "resp", "[", "'response'", "]", "[", "'docs'", "]", "raise", "NotFoundError" ]
:param str collection: The name of the collection for the request :param tuple doc_ids: ID of the document to be retrieved. Retrieve documents from Solr based on the ID. :: >>> solr.get('SolrClient_unittest','changeme')
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ":", "param", "tuple", "doc_ids", ":", "ID", "of", "the", "document", "to", "be", "retrieved", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L196-L213
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.delete_doc_by_id
def delete_doc_by_id(self, collection, doc_id, **kwargs): """ :param str collection: The name of the collection for the request :param str id: ID of the document to be deleted. Can specify '*' to delete everything. Deletes items from Solr based on the ID. :: >>> solr.delete_doc_by_id('SolrClient_unittest','changeme') """ if ' ' in doc_id: doc_id = '"{}"'.format(doc_id) temp = {"delete": {"query": 'id:{}'.format(doc_id)}} resp, con_inf = self.transport.send_request(method='POST', endpoint='update', collection=collection, data=json.dumps(temp), **kwargs) return resp
python
def delete_doc_by_id(self, collection, doc_id, **kwargs): """ :param str collection: The name of the collection for the request :param str id: ID of the document to be deleted. Can specify '*' to delete everything. Deletes items from Solr based on the ID. :: >>> solr.delete_doc_by_id('SolrClient_unittest','changeme') """ if ' ' in doc_id: doc_id = '"{}"'.format(doc_id) temp = {"delete": {"query": 'id:{}'.format(doc_id)}} resp, con_inf = self.transport.send_request(method='POST', endpoint='update', collection=collection, data=json.dumps(temp), **kwargs) return resp
[ "def", "delete_doc_by_id", "(", "self", ",", "collection", ",", "doc_id", ",", "*", "*", "kwargs", ")", ":", "if", "' '", "in", "doc_id", ":", "doc_id", "=", "'\"{}\"'", ".", "format", "(", "doc_id", ")", "temp", "=", "{", "\"delete\"", ":", "{", "\"query\"", ":", "'id:{}'", ".", "format", "(", "doc_id", ")", "}", "}", "resp", ",", "con_inf", "=", "self", ".", "transport", ".", "send_request", "(", "method", "=", "'POST'", ",", "endpoint", "=", "'update'", ",", "collection", "=", "collection", ",", "data", "=", "json", ".", "dumps", "(", "temp", ")", ",", "*", "*", "kwargs", ")", "return", "resp" ]
:param str collection: The name of the collection for the request :param str id: ID of the document to be deleted. Can specify '*' to delete everything. Deletes items from Solr based on the ID. :: >>> solr.delete_doc_by_id('SolrClient_unittest','changeme')
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ":", "param", "str", "id", ":", "ID", "of", "the", "document", "to", "be", "deleted", ".", "Can", "specify", "*", "to", "delete", "everything", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L215-L233
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.delete_doc_by_query
def delete_doc_by_query(self, collection, query, **kwargs): """ :param str collection: The name of the collection for the request :param str query: Query selecting documents to be deleted. Deletes items from Solr based on a given query. :: >>> solr.delete_doc_by_query('SolrClient_unittest','*:*') """ temp = {"delete": {"query": query}} resp, con_inf = self.transport.send_request(method='POST', endpoint='update', collection=collection, data=json.dumps(temp), **kwargs) return resp
python
def delete_doc_by_query(self, collection, query, **kwargs): """ :param str collection: The name of the collection for the request :param str query: Query selecting documents to be deleted. Deletes items from Solr based on a given query. :: >>> solr.delete_doc_by_query('SolrClient_unittest','*:*') """ temp = {"delete": {"query": query}} resp, con_inf = self.transport.send_request(method='POST', endpoint='update', collection=collection, data=json.dumps(temp), **kwargs) return resp
[ "def", "delete_doc_by_query", "(", "self", ",", "collection", ",", "query", ",", "*", "*", "kwargs", ")", ":", "temp", "=", "{", "\"delete\"", ":", "{", "\"query\"", ":", "query", "}", "}", "resp", ",", "con_inf", "=", "self", ".", "transport", ".", "send_request", "(", "method", "=", "'POST'", ",", "endpoint", "=", "'update'", ",", "collection", "=", "collection", ",", "data", "=", "json", ".", "dumps", "(", "temp", ")", ",", "*", "*", "kwargs", ")", "return", "resp" ]
:param str collection: The name of the collection for the request :param str query: Query selecting documents to be deleted. Deletes items from Solr based on a given query. :: >>> solr.delete_doc_by_query('SolrClient_unittest','*:*')
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ":", "param", "str", "query", ":", "Query", "selecting", "documents", "to", "be", "deleted", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L235-L251
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.local_index
def local_index(self, collection, filename, **kwargs): """ :param str collection: The name of the collection for the request :param str filename: String file path of the file to index. Will index specified file into Solr. The `file` must be local to the server, this is faster than other indexing options. If the files are already on the servers I suggest you use this. For example:: >>> solr.local_index('SolrClient_unittest', '/local/to/server/temp_file.json') """ filename = os.path.abspath(filename) self.logger.info("Indexing {} into Solr Collection {}".format(filename, collection)) data = {'stream.file': filename, 'stream.contentType': 'text/json'} resp, con_inf = self.transport.send_request(method='GET', endpoint='update/json', collection=collection, params=data, **kwargs) if resp['responseHeader']['status'] == 0: return True else: return False
python
def local_index(self, collection, filename, **kwargs): """ :param str collection: The name of the collection for the request :param str filename: String file path of the file to index. Will index specified file into Solr. The `file` must be local to the server, this is faster than other indexing options. If the files are already on the servers I suggest you use this. For example:: >>> solr.local_index('SolrClient_unittest', '/local/to/server/temp_file.json') """ filename = os.path.abspath(filename) self.logger.info("Indexing {} into Solr Collection {}".format(filename, collection)) data = {'stream.file': filename, 'stream.contentType': 'text/json'} resp, con_inf = self.transport.send_request(method='GET', endpoint='update/json', collection=collection, params=data, **kwargs) if resp['responseHeader']['status'] == 0: return True else: return False
[ "def", "local_index", "(", "self", ",", "collection", ",", "filename", ",", "*", "*", "kwargs", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "self", ".", "logger", ".", "info", "(", "\"Indexing {} into Solr Collection {}\"", ".", "format", "(", "filename", ",", "collection", ")", ")", "data", "=", "{", "'stream.file'", ":", "filename", ",", "'stream.contentType'", ":", "'text/json'", "}", "resp", ",", "con_inf", "=", "self", ".", "transport", ".", "send_request", "(", "method", "=", "'GET'", ",", "endpoint", "=", "'update/json'", ",", "collection", "=", "collection", ",", "params", "=", "data", ",", "*", "*", "kwargs", ")", "if", "resp", "[", "'responseHeader'", "]", "[", "'status'", "]", "==", "0", ":", "return", "True", "else", ":", "return", "False" ]
:param str collection: The name of the collection for the request :param str filename: String file path of the file to index. Will index specified file into Solr. The `file` must be local to the server, this is faster than other indexing options. If the files are already on the servers I suggest you use this. For example:: >>> solr.local_index('SolrClient_unittest', '/local/to/server/temp_file.json')
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ":", "param", "str", "filename", ":", "String", "file", "path", "of", "the", "file", "to", "index", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L276-L298
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.paging_query
def paging_query(self, collection, query, rows=1000, start=0, max_start=200000): """ :param str collection: The name of the collection for the request. :param dict query: Dictionary of solr args. :param int rows: Number of rows to return in each batch. Default is 1000. :param int start: What position to start with. Default is 0. :param int max_start: Once the start will reach this number, the function will stop. Default is 200000. Will page through the result set in increments of `row` WITHOUT using cursorMark until it has all items \ or until `max_start` is reached. Use max_start to protect your Solr instance if you are not sure how many items you \ will be getting. The default is 200,000, which is still a bit high. Returns an iterator of SolrResponse objects. For Example:: >>> for res in solr.paging_query('SolrClient_unittest',{'q':'*:*'}): print(res) """ query = dict(query) while True: query['start'] = start query['rows'] = rows res = self.query(collection, query) if res.get_results_count(): yield res start += rows if res.get_results_count() < rows or start > max_start: break
python
def paging_query(self, collection, query, rows=1000, start=0, max_start=200000): """ :param str collection: The name of the collection for the request. :param dict query: Dictionary of solr args. :param int rows: Number of rows to return in each batch. Default is 1000. :param int start: What position to start with. Default is 0. :param int max_start: Once the start will reach this number, the function will stop. Default is 200000. Will page through the result set in increments of `row` WITHOUT using cursorMark until it has all items \ or until `max_start` is reached. Use max_start to protect your Solr instance if you are not sure how many items you \ will be getting. The default is 200,000, which is still a bit high. Returns an iterator of SolrResponse objects. For Example:: >>> for res in solr.paging_query('SolrClient_unittest',{'q':'*:*'}): print(res) """ query = dict(query) while True: query['start'] = start query['rows'] = rows res = self.query(collection, query) if res.get_results_count(): yield res start += rows if res.get_results_count() < rows or start > max_start: break
[ "def", "paging_query", "(", "self", ",", "collection", ",", "query", ",", "rows", "=", "1000", ",", "start", "=", "0", ",", "max_start", "=", "200000", ")", ":", "query", "=", "dict", "(", "query", ")", "while", "True", ":", "query", "[", "'start'", "]", "=", "start", "query", "[", "'rows'", "]", "=", "rows", "res", "=", "self", ".", "query", "(", "collection", ",", "query", ")", "if", "res", ".", "get_results_count", "(", ")", ":", "yield", "res", "start", "+=", "rows", "if", "res", ".", "get_results_count", "(", ")", "<", "rows", "or", "start", ">", "max_start", ":", "break" ]
:param str collection: The name of the collection for the request. :param dict query: Dictionary of solr args. :param int rows: Number of rows to return in each batch. Default is 1000. :param int start: What position to start with. Default is 0. :param int max_start: Once the start will reach this number, the function will stop. Default is 200000. Will page through the result set in increments of `row` WITHOUT using cursorMark until it has all items \ or until `max_start` is reached. Use max_start to protect your Solr instance if you are not sure how many items you \ will be getting. The default is 200,000, which is still a bit high. Returns an iterator of SolrResponse objects. For Example:: >>> for res in solr.paging_query('SolrClient_unittest',{'q':'*:*'}): print(res)
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ".", ":", "param", "dict", "query", ":", "Dictionary", "of", "solr", "args", ".", ":", "param", "int", "rows", ":", "Number", "of", "rows", "to", "return", "in", "each", "batch", ".", "Default", "is", "1000", ".", ":", "param", "int", "start", ":", "What", "position", "to", "start", "with", ".", "Default", "is", "0", ".", ":", "param", "int", "max_start", ":", "Once", "the", "start", "will", "reach", "this", "number", "the", "function", "will", "stop", ".", "Default", "is", "200000", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L301-L328
train
moonlitesolutions/SolrClient
SolrClient/solrclient.py
SolrClient.cursor_query
def cursor_query(self, collection, query): """ :param str collection: The name of the collection for the request. :param dict query: Dictionary of solr args. Will page through the result set in increments using cursorMark until it has all items. Sort is required for cursorMark \ queries, if you don't specify it, the default is 'id desc'. Returns an iterator of SolrResponse objects. For Example:: >>> for res in solr.cursor_query('SolrClient_unittest',{'q':'*:*'}): print(res) """ cursor = '*' if 'sort' not in query: query['sort'] = 'id desc' while True: query['cursorMark'] = cursor # Get data with starting cursorMark results = self.query(collection, query) if results.get_results_count(): cursor = results.get_cursor() yield results else: self.logger.debug("Got zero Results with cursor: {}".format(cursor)) break
python
def cursor_query(self, collection, query): """ :param str collection: The name of the collection for the request. :param dict query: Dictionary of solr args. Will page through the result set in increments using cursorMark until it has all items. Sort is required for cursorMark \ queries, if you don't specify it, the default is 'id desc'. Returns an iterator of SolrResponse objects. For Example:: >>> for res in solr.cursor_query('SolrClient_unittest',{'q':'*:*'}): print(res) """ cursor = '*' if 'sort' not in query: query['sort'] = 'id desc' while True: query['cursorMark'] = cursor # Get data with starting cursorMark results = self.query(collection, query) if results.get_results_count(): cursor = results.get_cursor() yield results else: self.logger.debug("Got zero Results with cursor: {}".format(cursor)) break
[ "def", "cursor_query", "(", "self", ",", "collection", ",", "query", ")", ":", "cursor", "=", "'*'", "if", "'sort'", "not", "in", "query", ":", "query", "[", "'sort'", "]", "=", "'id desc'", "while", "True", ":", "query", "[", "'cursorMark'", "]", "=", "cursor", "# Get data with starting cursorMark", "results", "=", "self", ".", "query", "(", "collection", ",", "query", ")", "if", "results", ".", "get_results_count", "(", ")", ":", "cursor", "=", "results", ".", "get_cursor", "(", ")", "yield", "results", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"Got zero Results with cursor: {}\"", ".", "format", "(", "cursor", ")", ")", "break" ]
:param str collection: The name of the collection for the request. :param dict query: Dictionary of solr args. Will page through the result set in increments using cursorMark until it has all items. Sort is required for cursorMark \ queries, if you don't specify it, the default is 'id desc'. Returns an iterator of SolrResponse objects. For Example:: >>> for res in solr.cursor_query('SolrClient_unittest',{'q':'*:*'}): print(res)
[ ":", "param", "str", "collection", ":", "The", "name", "of", "the", "collection", "for", "the", "request", ".", ":", "param", "dict", "query", ":", "Dictionary", "of", "solr", "args", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrclient.py#L330-L355
train
moonlitesolutions/SolrClient
SolrClient/routers/aware.py
AwareRouter.get_shard_map
def get_shard_map(self, force_refresh=False): """ You can change this function to get the shard-map from somewhere/somehow place else in conjuction with save_shard_map(). """ now = datetime.utcnow() if force_refresh is True or \ self.shard_map is None or \ (now - self.last_refresh).total_seconds() > self.refresh_ttl: self.last_refresh = now self.refresh_shard_map() return self.shard_map
python
def get_shard_map(self, force_refresh=False): """ You can change this function to get the shard-map from somewhere/somehow place else in conjuction with save_shard_map(). """ now = datetime.utcnow() if force_refresh is True or \ self.shard_map is None or \ (now - self.last_refresh).total_seconds() > self.refresh_ttl: self.last_refresh = now self.refresh_shard_map() return self.shard_map
[ "def", "get_shard_map", "(", "self", ",", "force_refresh", "=", "False", ")", ":", "now", "=", "datetime", ".", "utcnow", "(", ")", "if", "force_refresh", "is", "True", "or", "self", ".", "shard_map", "is", "None", "or", "(", "now", "-", "self", ".", "last_refresh", ")", ".", "total_seconds", "(", ")", ">", "self", ".", "refresh_ttl", ":", "self", ".", "last_refresh", "=", "now", "self", ".", "refresh_shard_map", "(", ")", "return", "self", ".", "shard_map" ]
You can change this function to get the shard-map from somewhere/somehow place else in conjuction with save_shard_map().
[ "You", "can", "change", "this", "function", "to", "get", "the", "shard", "-", "map", "from", "somewhere", "/", "somehow", "place", "else", "in", "conjuction", "with", "save_shard_map", "()", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/routers/aware.py#L122-L134
train
moonlitesolutions/SolrClient
SolrClient/zk.py
ZK.check_zk
def check_zk(self): ''' Will attempt to telnet to each zookeeper that is used by SolrClient and issue 'mntr' command. Response is parsed to check to see if the zookeeper node is a leader or a follower and returned as a dict. If the telnet collection fails or the proper response is not parsed, the zk node will be listed as 'down' in the dict. Desired values are either follower or leader. ''' import telnetlib temp = self.zk_hosts.split('/') zks = temp[0].split(',') status = {} for zk in zks: self.logger.debug("Checking {}".format(zk)) host, port = zk.split(':') try: t = telnetlib.Telnet(host, port=int(port)) t.write('mntr'.encode('ascii')) r = t.read_all() for out in r.decode('utf-8').split('\n'): if out: param, val = out.split('\t') if param == 'zk_server_state': status[zk] = val except Exception as e: self.logger.error("Unable to reach ZK: {}".format(zk)) self.logger.exception(e) status[zk] = 'down' #assert len(zks) == len(status) return status
python
def check_zk(self): ''' Will attempt to telnet to each zookeeper that is used by SolrClient and issue 'mntr' command. Response is parsed to check to see if the zookeeper node is a leader or a follower and returned as a dict. If the telnet collection fails or the proper response is not parsed, the zk node will be listed as 'down' in the dict. Desired values are either follower or leader. ''' import telnetlib temp = self.zk_hosts.split('/') zks = temp[0].split(',') status = {} for zk in zks: self.logger.debug("Checking {}".format(zk)) host, port = zk.split(':') try: t = telnetlib.Telnet(host, port=int(port)) t.write('mntr'.encode('ascii')) r = t.read_all() for out in r.decode('utf-8').split('\n'): if out: param, val = out.split('\t') if param == 'zk_server_state': status[zk] = val except Exception as e: self.logger.error("Unable to reach ZK: {}".format(zk)) self.logger.exception(e) status[zk] = 'down' #assert len(zks) == len(status) return status
[ "def", "check_zk", "(", "self", ")", ":", "import", "telnetlib", "temp", "=", "self", ".", "zk_hosts", ".", "split", "(", "'/'", ")", "zks", "=", "temp", "[", "0", "]", ".", "split", "(", "','", ")", "status", "=", "{", "}", "for", "zk", "in", "zks", ":", "self", ".", "logger", ".", "debug", "(", "\"Checking {}\"", ".", "format", "(", "zk", ")", ")", "host", ",", "port", "=", "zk", ".", "split", "(", "':'", ")", "try", ":", "t", "=", "telnetlib", ".", "Telnet", "(", "host", ",", "port", "=", "int", "(", "port", ")", ")", "t", ".", "write", "(", "'mntr'", ".", "encode", "(", "'ascii'", ")", ")", "r", "=", "t", ".", "read_all", "(", ")", "for", "out", "in", "r", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "'\\n'", ")", ":", "if", "out", ":", "param", ",", "val", "=", "out", ".", "split", "(", "'\\t'", ")", "if", "param", "==", "'zk_server_state'", ":", "status", "[", "zk", "]", "=", "val", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "\"Unable to reach ZK: {}\"", ".", "format", "(", "zk", ")", ")", "self", ".", "logger", ".", "exception", "(", "e", ")", "status", "[", "zk", "]", "=", "'down'", "#assert len(zks) == len(status)", "return", "status" ]
Will attempt to telnet to each zookeeper that is used by SolrClient and issue 'mntr' command. Response is parsed to check to see if the zookeeper node is a leader or a follower and returned as a dict. If the telnet collection fails or the proper response is not parsed, the zk node will be listed as 'down' in the dict. Desired values are either follower or leader.
[ "Will", "attempt", "to", "telnet", "to", "each", "zookeeper", "that", "is", "used", "by", "SolrClient", "and", "issue", "mntr", "command", ".", "Response", "is", "parsed", "to", "check", "to", "see", "if", "the", "zookeeper", "node", "is", "a", "leader", "or", "a", "follower", "and", "returned", "as", "a", "dict", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/zk.py#L43-L72
train
moonlitesolutions/SolrClient
SolrClient/zk.py
ZK.copy_config
def copy_config(self, original, new): ''' Copies collection configs into a new folder. Can be used to create new collections based on existing configs. Basically, copies all nodes under /configs/original to /configs/new. :param original str: ZK name of original config :param new str: New name of the ZK config. ''' if not self.kz.exists('/configs/{}'.format(original)): raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {}".format(self.kz.get_children('/configs'))) base = '/configs/{}'.format(original) nbase = '/configs/{}'.format(new) self._copy_dir(base, nbase)
python
def copy_config(self, original, new): ''' Copies collection configs into a new folder. Can be used to create new collections based on existing configs. Basically, copies all nodes under /configs/original to /configs/new. :param original str: ZK name of original config :param new str: New name of the ZK config. ''' if not self.kz.exists('/configs/{}'.format(original)): raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {}".format(self.kz.get_children('/configs'))) base = '/configs/{}'.format(original) nbase = '/configs/{}'.format(new) self._copy_dir(base, nbase)
[ "def", "copy_config", "(", "self", ",", "original", ",", "new", ")", ":", "if", "not", "self", ".", "kz", ".", "exists", "(", "'/configs/{}'", ".", "format", "(", "original", ")", ")", ":", "raise", "ZookeeperError", "(", "\"Collection doesn't exist in Zookeeper. Current Collections are: {}\"", ".", "format", "(", "self", ".", "kz", ".", "get_children", "(", "'/configs'", ")", ")", ")", "base", "=", "'/configs/{}'", ".", "format", "(", "original", ")", "nbase", "=", "'/configs/{}'", ".", "format", "(", "new", ")", "self", ".", "_copy_dir", "(", "base", ",", "nbase", ")" ]
Copies collection configs into a new folder. Can be used to create new collections based on existing configs. Basically, copies all nodes under /configs/original to /configs/new. :param original str: ZK name of original config :param new str: New name of the ZK config.
[ "Copies", "collection", "configs", "into", "a", "new", "folder", ".", "Can", "be", "used", "to", "create", "new", "collections", "based", "on", "existing", "configs", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/zk.py#L98-L111
train
moonlitesolutions/SolrClient
SolrClient/zk.py
ZK.download_collection_configs
def download_collection_configs(self, collection, fs_path): ''' Downloads ZK Directory to the FileSystem. :param collection str: Name of the collection (zk config name) :param fs_path str: Destination filesystem path. ''' if not self.kz.exists('/configs/{}'.format(collection)): raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {} ".format(self.kz.get_children('/configs'))) self._download_dir('/configs/{}'.format(collection), fs_path + os.sep + collection)
python
def download_collection_configs(self, collection, fs_path): ''' Downloads ZK Directory to the FileSystem. :param collection str: Name of the collection (zk config name) :param fs_path str: Destination filesystem path. ''' if not self.kz.exists('/configs/{}'.format(collection)): raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {} ".format(self.kz.get_children('/configs'))) self._download_dir('/configs/{}'.format(collection), fs_path + os.sep + collection)
[ "def", "download_collection_configs", "(", "self", ",", "collection", ",", "fs_path", ")", ":", "if", "not", "self", ".", "kz", ".", "exists", "(", "'/configs/{}'", ".", "format", "(", "collection", ")", ")", ":", "raise", "ZookeeperError", "(", "\"Collection doesn't exist in Zookeeper. Current Collections are: {} \"", ".", "format", "(", "self", ".", "kz", ".", "get_children", "(", "'/configs'", ")", ")", ")", "self", ".", "_download_dir", "(", "'/configs/{}'", ".", "format", "(", "collection", ")", ",", "fs_path", "+", "os", ".", "sep", "+", "collection", ")" ]
Downloads ZK Directory to the FileSystem. :param collection str: Name of the collection (zk config name) :param fs_path str: Destination filesystem path.
[ "Downloads", "ZK", "Directory", "to", "the", "FileSystem", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/zk.py#L114-L124
train
moonlitesolutions/SolrClient
SolrClient/zk.py
ZK.upload_collection_configs
def upload_collection_configs(self, collection, fs_path): ''' Uploads collection configurations from a specified directory to zookeeper. ''' coll_path = fs_path if not os.path.isdir(coll_path): raise ValueError("{} Doesn't Exist".format(coll_path)) self._upload_dir(coll_path, '/configs/{}'.format(collection))
python
def upload_collection_configs(self, collection, fs_path): ''' Uploads collection configurations from a specified directory to zookeeper. ''' coll_path = fs_path if not os.path.isdir(coll_path): raise ValueError("{} Doesn't Exist".format(coll_path)) self._upload_dir(coll_path, '/configs/{}'.format(collection))
[ "def", "upload_collection_configs", "(", "self", ",", "collection", ",", "fs_path", ")", ":", "coll_path", "=", "fs_path", "if", "not", "os", ".", "path", ".", "isdir", "(", "coll_path", ")", ":", "raise", "ValueError", "(", "\"{} Doesn't Exist\"", ".", "format", "(", "coll_path", ")", ")", "self", ".", "_upload_dir", "(", "coll_path", ",", "'/configs/{}'", ".", "format", "(", "collection", ")", ")" ]
Uploads collection configurations from a specified directory to zookeeper.
[ "Uploads", "collection", "configurations", "from", "a", "specified", "directory", "to", "zookeeper", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/zk.py#L144-L152
train
moonlitesolutions/SolrClient
SolrClient/schema.py
Schema.create_field
def create_field(self, collection, field_dict): ''' Creates a new field in managed schema, will raise ValueError if the field already exists. field_dict should look like this:: { "name":"sell-by", "type":"tdate", "stored":True } Reference: https://cwiki.apache.org/confluence/display/solr/Defining+Fields ''' if self.does_field_exist(collection,field_dict['name']): raise ValueError("Field {} Already Exists in Solr Collection {}".format(field_dict['name'],collection)) temp = {"add-field":dict(field_dict)} res, con_info =self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp)) return res
python
def create_field(self, collection, field_dict): ''' Creates a new field in managed schema, will raise ValueError if the field already exists. field_dict should look like this:: { "name":"sell-by", "type":"tdate", "stored":True } Reference: https://cwiki.apache.org/confluence/display/solr/Defining+Fields ''' if self.does_field_exist(collection,field_dict['name']): raise ValueError("Field {} Already Exists in Solr Collection {}".format(field_dict['name'],collection)) temp = {"add-field":dict(field_dict)} res, con_info =self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp)) return res
[ "def", "create_field", "(", "self", ",", "collection", ",", "field_dict", ")", ":", "if", "self", ".", "does_field_exist", "(", "collection", ",", "field_dict", "[", "'name'", "]", ")", ":", "raise", "ValueError", "(", "\"Field {} Already Exists in Solr Collection {}\"", ".", "format", "(", "field_dict", "[", "'name'", "]", ",", "collection", ")", ")", "temp", "=", "{", "\"add-field\"", ":", "dict", "(", "field_dict", ")", "}", "res", ",", "con_info", "=", "self", ".", "solr", ".", "transport", ".", "send_request", "(", "method", "=", "'POST'", ",", "endpoint", "=", "self", ".", "schema_endpoint", ",", "collection", "=", "collection", ",", "data", "=", "json", ".", "dumps", "(", "temp", ")", ")", "return", "res" ]
Creates a new field in managed schema, will raise ValueError if the field already exists. field_dict should look like this:: { "name":"sell-by", "type":"tdate", "stored":True } Reference: https://cwiki.apache.org/confluence/display/solr/Defining+Fields
[ "Creates", "a", "new", "field", "in", "managed", "schema", "will", "raise", "ValueError", "if", "the", "field", "already", "exists", ".", "field_dict", "should", "look", "like", "this", "::" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/schema.py#L35-L52
train
moonlitesolutions/SolrClient
SolrClient/schema.py
Schema.delete_field
def delete_field(self,collection,field_name): ''' Deletes a field from the Solr Collection. Will raise ValueError if the field doesn't exist. :param string collection: Name of the collection for the action :param string field_name: String name of the field. ''' if not self.does_field_exist(collection,field_name): raise ValueError("Field {} Doesn't Exists in Solr Collection {}".format(field_name,collection)) else: temp = {"delete-field" : { "name":field_name }} res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp)) return res
python
def delete_field(self,collection,field_name): ''' Deletes a field from the Solr Collection. Will raise ValueError if the field doesn't exist. :param string collection: Name of the collection for the action :param string field_name: String name of the field. ''' if not self.does_field_exist(collection,field_name): raise ValueError("Field {} Doesn't Exists in Solr Collection {}".format(field_name,collection)) else: temp = {"delete-field" : { "name":field_name }} res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp)) return res
[ "def", "delete_field", "(", "self", ",", "collection", ",", "field_name", ")", ":", "if", "not", "self", ".", "does_field_exist", "(", "collection", ",", "field_name", ")", ":", "raise", "ValueError", "(", "\"Field {} Doesn't Exists in Solr Collection {}\"", ".", "format", "(", "field_name", ",", "collection", ")", ")", "else", ":", "temp", "=", "{", "\"delete-field\"", ":", "{", "\"name\"", ":", "field_name", "}", "}", "res", ",", "con_info", "=", "self", ".", "solr", ".", "transport", ".", "send_request", "(", "method", "=", "'POST'", ",", "endpoint", "=", "self", ".", "schema_endpoint", ",", "collection", "=", "collection", ",", "data", "=", "json", ".", "dumps", "(", "temp", ")", ")", "return", "res" ]
Deletes a field from the Solr Collection. Will raise ValueError if the field doesn't exist. :param string collection: Name of the collection for the action :param string field_name: String name of the field.
[ "Deletes", "a", "field", "from", "the", "Solr", "Collection", ".", "Will", "raise", "ValueError", "if", "the", "field", "doesn", "t", "exist", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/schema.py#L70-L82
train
moonlitesolutions/SolrClient
SolrClient/schema.py
Schema.does_field_exist
def does_field_exist(self,collection,field_name): ''' Checks if the field exists will return a boolean True (exists) or False(doesn't exist). :param string collection: Name of the collection for the action :param string field_name: String name of the field. ''' schema = self.get_schema_fields(collection) logging.info(schema) return True if field_name in [field['name'] for field in schema['fields']] else False
python
def does_field_exist(self,collection,field_name): ''' Checks if the field exists will return a boolean True (exists) or False(doesn't exist). :param string collection: Name of the collection for the action :param string field_name: String name of the field. ''' schema = self.get_schema_fields(collection) logging.info(schema) return True if field_name in [field['name'] for field in schema['fields']] else False
[ "def", "does_field_exist", "(", "self", ",", "collection", ",", "field_name", ")", ":", "schema", "=", "self", ".", "get_schema_fields", "(", "collection", ")", "logging", ".", "info", "(", "schema", ")", "return", "True", "if", "field_name", "in", "[", "field", "[", "'name'", "]", "for", "field", "in", "schema", "[", "'fields'", "]", "]", "else", "False" ]
Checks if the field exists will return a boolean True (exists) or False(doesn't exist). :param string collection: Name of the collection for the action :param string field_name: String name of the field.
[ "Checks", "if", "the", "field", "exists", "will", "return", "a", "boolean", "True", "(", "exists", ")", "or", "False", "(", "doesn", "t", "exist", ")", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/schema.py#L85-L94
train
moonlitesolutions/SolrClient
SolrClient/schema.py
Schema.create_copy_field
def create_copy_field(self,collection,copy_dict): ''' Creates a copy field. copy_dict should look like :: {'source':'source_field_name','dest':'destination_field_name'} :param string collection: Name of the collection for the action :param dict copy_field: Dictionary of field info Reference: https://cwiki.apache.org/confluence/display/solr/Schema+API#SchemaAPI-AddaNewCopyFieldRule ''' temp = {"add-copy-field":dict(copy_dict)} res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp)) return res
python
def create_copy_field(self,collection,copy_dict): ''' Creates a copy field. copy_dict should look like :: {'source':'source_field_name','dest':'destination_field_name'} :param string collection: Name of the collection for the action :param dict copy_field: Dictionary of field info Reference: https://cwiki.apache.org/confluence/display/solr/Schema+API#SchemaAPI-AddaNewCopyFieldRule ''' temp = {"add-copy-field":dict(copy_dict)} res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp)) return res
[ "def", "create_copy_field", "(", "self", ",", "collection", ",", "copy_dict", ")", ":", "temp", "=", "{", "\"add-copy-field\"", ":", "dict", "(", "copy_dict", ")", "}", "res", ",", "con_info", "=", "self", ".", "solr", ".", "transport", ".", "send_request", "(", "method", "=", "'POST'", ",", "endpoint", "=", "self", ".", "schema_endpoint", ",", "collection", "=", "collection", ",", "data", "=", "json", ".", "dumps", "(", "temp", ")", ")", "return", "res" ]
Creates a copy field. copy_dict should look like :: {'source':'source_field_name','dest':'destination_field_name'} :param string collection: Name of the collection for the action :param dict copy_field: Dictionary of field info Reference: https://cwiki.apache.org/confluence/display/solr/Schema+API#SchemaAPI-AddaNewCopyFieldRule
[ "Creates", "a", "copy", "field", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/schema.py#L96-L111
train
moonlitesolutions/SolrClient
SolrClient/schema.py
Schema.delete_copy_field
def delete_copy_field(self, collection, copy_dict): ''' Deletes a copy field. copy_dict should look like :: {'source':'source_field_name','dest':'destination_field_name'} :param string collection: Name of the collection for the action :param dict copy_field: Dictionary of field info ''' #Fix this later to check for field before sending a delete if self.devel: self.logger.debug("Deleting {}".format(str(copy_dict))) copyfields = self.get_schema_copyfields(collection) if copy_dict not in copyfields: self.logger.info("Fieldset not in Solr Copy Fields: {}".format(str(copy_dict))) temp = {"delete-copy-field": dict(copy_dict)} res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp)) return res
python
def delete_copy_field(self, collection, copy_dict): ''' Deletes a copy field. copy_dict should look like :: {'source':'source_field_name','dest':'destination_field_name'} :param string collection: Name of the collection for the action :param dict copy_field: Dictionary of field info ''' #Fix this later to check for field before sending a delete if self.devel: self.logger.debug("Deleting {}".format(str(copy_dict))) copyfields = self.get_schema_copyfields(collection) if copy_dict not in copyfields: self.logger.info("Fieldset not in Solr Copy Fields: {}".format(str(copy_dict))) temp = {"delete-copy-field": dict(copy_dict)} res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp)) return res
[ "def", "delete_copy_field", "(", "self", ",", "collection", ",", "copy_dict", ")", ":", "#Fix this later to check for field before sending a delete", "if", "self", ".", "devel", ":", "self", ".", "logger", ".", "debug", "(", "\"Deleting {}\"", ".", "format", "(", "str", "(", "copy_dict", ")", ")", ")", "copyfields", "=", "self", ".", "get_schema_copyfields", "(", "collection", ")", "if", "copy_dict", "not", "in", "copyfields", ":", "self", ".", "logger", ".", "info", "(", "\"Fieldset not in Solr Copy Fields: {}\"", ".", "format", "(", "str", "(", "copy_dict", ")", ")", ")", "temp", "=", "{", "\"delete-copy-field\"", ":", "dict", "(", "copy_dict", ")", "}", "res", ",", "con_info", "=", "self", ".", "solr", ".", "transport", ".", "send_request", "(", "method", "=", "'POST'", ",", "endpoint", "=", "self", ".", "schema_endpoint", ",", "collection", "=", "collection", ",", "data", "=", "json", ".", "dumps", "(", "temp", ")", ")", "return", "res" ]
Deletes a copy field. copy_dict should look like :: {'source':'source_field_name','dest':'destination_field_name'} :param string collection: Name of the collection for the action :param dict copy_field: Dictionary of field info
[ "Deletes", "a", "copy", "field", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/schema.py#L113-L133
train
moonlitesolutions/SolrClient
SolrClient/routers/base.py
BaseRouter.shuffle_hosts
def shuffle_hosts(self): """ Shuffle hosts so we don't always query the first one. Example: using in a webapp with X processes in Y servers, the hosts contacted will be more random. The user can also call this function to reshuffle every 'x' seconds or before every request. :return: """ if len(self.hosts) > 1: random.shuffle(self.hosts) return self.hosts
python
def shuffle_hosts(self): """ Shuffle hosts so we don't always query the first one. Example: using in a webapp with X processes in Y servers, the hosts contacted will be more random. The user can also call this function to reshuffle every 'x' seconds or before every request. :return: """ if len(self.hosts) > 1: random.shuffle(self.hosts) return self.hosts
[ "def", "shuffle_hosts", "(", "self", ")", ":", "if", "len", "(", "self", ".", "hosts", ")", ">", "1", ":", "random", ".", "shuffle", "(", "self", ".", "hosts", ")", "return", "self", ".", "hosts" ]
Shuffle hosts so we don't always query the first one. Example: using in a webapp with X processes in Y servers, the hosts contacted will be more random. The user can also call this function to reshuffle every 'x' seconds or before every request. :return:
[ "Shuffle", "hosts", "so", "we", "don", "t", "always", "query", "the", "first", "one", ".", "Example", ":", "using", "in", "a", "webapp", "with", "X", "processes", "in", "Y", "servers", "the", "hosts", "contacted", "will", "be", "more", "random", ".", "The", "user", "can", "also", "call", "this", "function", "to", "reshuffle", "every", "x", "seconds", "or", "before", "every", "request", ".", ":", "return", ":" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/routers/base.py#L26-L35
train
drobota/robotframework-xvfb
XvfbRobot/__init__.py
XvfbRobot.start_virtual_display
def start_virtual_display(self, width=1440, height=900, colordepth=24, **kwargs): """Starts virtual display which will be destroyed after test execution will be end *Arguments:* - width: a width to be set in pixels - height: a height to be set in pixels - color_depth: a color depth to be used - kwargs: extra parameters *Example:* | Start Virtual Display | | Start Virtual Display | 1920 | 1080 | | Start Virtual Display | ${1920} | ${1080} | ${16} | """ if self._display is None: logger.info("Using virtual display: '{0}x{1}x{2}'".format( width, height, colordepth)) self._display = Xvfb(int(width), int(height), int(colordepth), **kwargs) self._display.start() atexit.register(self._display.stop)
python
def start_virtual_display(self, width=1440, height=900, colordepth=24, **kwargs): """Starts virtual display which will be destroyed after test execution will be end *Arguments:* - width: a width to be set in pixels - height: a height to be set in pixels - color_depth: a color depth to be used - kwargs: extra parameters *Example:* | Start Virtual Display | | Start Virtual Display | 1920 | 1080 | | Start Virtual Display | ${1920} | ${1080} | ${16} | """ if self._display is None: logger.info("Using virtual display: '{0}x{1}x{2}'".format( width, height, colordepth)) self._display = Xvfb(int(width), int(height), int(colordepth), **kwargs) self._display.start() atexit.register(self._display.stop)
[ "def", "start_virtual_display", "(", "self", ",", "width", "=", "1440", ",", "height", "=", "900", ",", "colordepth", "=", "24", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_display", "is", "None", ":", "logger", ".", "info", "(", "\"Using virtual display: '{0}x{1}x{2}'\"", ".", "format", "(", "width", ",", "height", ",", "colordepth", ")", ")", "self", ".", "_display", "=", "Xvfb", "(", "int", "(", "width", ")", ",", "int", "(", "height", ")", ",", "int", "(", "colordepth", ")", ",", "*", "*", "kwargs", ")", "self", ".", "_display", ".", "start", "(", ")", "atexit", ".", "register", "(", "self", ".", "_display", ".", "stop", ")" ]
Starts virtual display which will be destroyed after test execution will be end *Arguments:* - width: a width to be set in pixels - height: a height to be set in pixels - color_depth: a color depth to be used - kwargs: extra parameters *Example:* | Start Virtual Display | | Start Virtual Display | 1920 | 1080 | | Start Virtual Display | ${1920} | ${1080} | ${16} |
[ "Starts", "virtual", "display", "which", "will", "be", "destroyed", "after", "test", "execution", "will", "be", "end" ]
66cd6174b637ab71aada71067ce4eaaa3280eb17
https://github.com/drobota/robotframework-xvfb/blob/66cd6174b637ab71aada71067ce4eaaa3280eb17/XvfbRobot/__init__.py#L34-L58
train
moonlitesolutions/SolrClient
SolrClient/collections.py
Collections.api
def api(self, action, args=None): """ Sends a request to Solr Collections API. Documentation is here: https://cwiki.apache.org/confluence/display/solr/Collections+API :param string action: Name of the collection for the action :param dict args: Dictionary of specific parameters for action """ if args is None: args = {} args['action'] = action.upper() try: res, con_info = self.solr.transport.send_request(endpoint='admin/collections', params=args) except Exception as e: self.logger.error("Error querying SolrCloud Collections API. ") self.logger.exception(e) raise e if 'responseHeader' in res and res['responseHeader']['status'] == 0: return res, con_info else: raise SolrError("Error Issuing Collections API Call for: {} +".format(con_info, res))
python
def api(self, action, args=None): """ Sends a request to Solr Collections API. Documentation is here: https://cwiki.apache.org/confluence/display/solr/Collections+API :param string action: Name of the collection for the action :param dict args: Dictionary of specific parameters for action """ if args is None: args = {} args['action'] = action.upper() try: res, con_info = self.solr.transport.send_request(endpoint='admin/collections', params=args) except Exception as e: self.logger.error("Error querying SolrCloud Collections API. ") self.logger.exception(e) raise e if 'responseHeader' in res and res['responseHeader']['status'] == 0: return res, con_info else: raise SolrError("Error Issuing Collections API Call for: {} +".format(con_info, res))
[ "def", "api", "(", "self", ",", "action", ",", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "{", "}", "args", "[", "'action'", "]", "=", "action", ".", "upper", "(", ")", "try", ":", "res", ",", "con_info", "=", "self", ".", "solr", ".", "transport", ".", "send_request", "(", "endpoint", "=", "'admin/collections'", ",", "params", "=", "args", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "\"Error querying SolrCloud Collections API. \"", ")", "self", ".", "logger", ".", "exception", "(", "e", ")", "raise", "e", "if", "'responseHeader'", "in", "res", "and", "res", "[", "'responseHeader'", "]", "[", "'status'", "]", "==", "0", ":", "return", "res", ",", "con_info", "else", ":", "raise", "SolrError", "(", "\"Error Issuing Collections API Call for: {} +\"", ".", "format", "(", "con_info", ",", "res", ")", ")" ]
Sends a request to Solr Collections API. Documentation is here: https://cwiki.apache.org/confluence/display/solr/Collections+API :param string action: Name of the collection for the action :param dict args: Dictionary of specific parameters for action
[ "Sends", "a", "request", "to", "Solr", "Collections", "API", ".", "Documentation", "is", "here", ":", "https", ":", "//", "cwiki", ".", "apache", ".", "org", "/", "confluence", "/", "display", "/", "solr", "/", "Collections", "+", "API" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/collections.py#L20-L42
train
moonlitesolutions/SolrClient
SolrClient/collections.py
Collections.clusterstatus
def clusterstatus(self): """ Returns a slightly slimmed down version of the clusterstatus api command. It also gets count of documents in each shard on each replica and returns it as doc_count key for each replica. """ res = self.cluster_status_raw() cluster = res['cluster']['collections'] out = {} try: for collection in cluster: out[collection] = {} for shard in cluster[collection]['shards']: out[collection][shard] = {} for replica in cluster[collection]['shards'][shard]['replicas']: out[collection][shard][replica] = cluster[collection]['shards'][shard]['replicas'][replica] if out[collection][shard][replica]['state'] != 'active': out[collection][shard][replica]['doc_count'] = False else: out[collection][shard][replica]['doc_count'] = self._get_collection_counts( out[collection][shard][replica]) except Exception as e: self.logger.error("Couldn't parse response from clusterstatus API call") self.logger.exception(e) return out
python
def clusterstatus(self): """ Returns a slightly slimmed down version of the clusterstatus api command. It also gets count of documents in each shard on each replica and returns it as doc_count key for each replica. """ res = self.cluster_status_raw() cluster = res['cluster']['collections'] out = {} try: for collection in cluster: out[collection] = {} for shard in cluster[collection]['shards']: out[collection][shard] = {} for replica in cluster[collection]['shards'][shard]['replicas']: out[collection][shard][replica] = cluster[collection]['shards'][shard]['replicas'][replica] if out[collection][shard][replica]['state'] != 'active': out[collection][shard][replica]['doc_count'] = False else: out[collection][shard][replica]['doc_count'] = self._get_collection_counts( out[collection][shard][replica]) except Exception as e: self.logger.error("Couldn't parse response from clusterstatus API call") self.logger.exception(e) return out
[ "def", "clusterstatus", "(", "self", ")", ":", "res", "=", "self", ".", "cluster_status_raw", "(", ")", "cluster", "=", "res", "[", "'cluster'", "]", "[", "'collections'", "]", "out", "=", "{", "}", "try", ":", "for", "collection", "in", "cluster", ":", "out", "[", "collection", "]", "=", "{", "}", "for", "shard", "in", "cluster", "[", "collection", "]", "[", "'shards'", "]", ":", "out", "[", "collection", "]", "[", "shard", "]", "=", "{", "}", "for", "replica", "in", "cluster", "[", "collection", "]", "[", "'shards'", "]", "[", "shard", "]", "[", "'replicas'", "]", ":", "out", "[", "collection", "]", "[", "shard", "]", "[", "replica", "]", "=", "cluster", "[", "collection", "]", "[", "'shards'", "]", "[", "shard", "]", "[", "'replicas'", "]", "[", "replica", "]", "if", "out", "[", "collection", "]", "[", "shard", "]", "[", "replica", "]", "[", "'state'", "]", "!=", "'active'", ":", "out", "[", "collection", "]", "[", "shard", "]", "[", "replica", "]", "[", "'doc_count'", "]", "=", "False", "else", ":", "out", "[", "collection", "]", "[", "shard", "]", "[", "replica", "]", "[", "'doc_count'", "]", "=", "self", ".", "_get_collection_counts", "(", "out", "[", "collection", "]", "[", "shard", "]", "[", "replica", "]", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "\"Couldn't parse response from clusterstatus API call\"", ")", "self", ".", "logger", ".", "exception", "(", "e", ")", "return", "out" ]
Returns a slightly slimmed down version of the clusterstatus api command. It also gets count of documents in each shard on each replica and returns it as doc_count key for each replica.
[ "Returns", "a", "slightly", "slimmed", "down", "version", "of", "the", "clusterstatus", "api", "command", ".", "It", "also", "gets", "count", "of", "documents", "in", "each", "shard", "on", "each", "replica", "and", "returns", "it", "as", "doc_count", "key", "for", "each", "replica", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/collections.py#L44-L71
train
moonlitesolutions/SolrClient
SolrClient/collections.py
Collections.create
def create(self, name, numShards, params=None): """ Create a new collection. """ if params is None: params = {} params.update( name=name, numShards=numShards ) return self.api('CREATE', params)
python
def create(self, name, numShards, params=None): """ Create a new collection. """ if params is None: params = {} params.update( name=name, numShards=numShards ) return self.api('CREATE', params)
[ "def", "create", "(", "self", ",", "name", ",", "numShards", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "params", ".", "update", "(", "name", "=", "name", ",", "numShards", "=", "numShards", ")", "return", "self", ".", "api", "(", "'CREATE'", ",", "params", ")" ]
Create a new collection.
[ "Create", "a", "new", "collection", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/collections.py#L97-L107
train
moonlitesolutions/SolrClient
SolrClient/collections.py
Collections._get_collection_counts
def _get_collection_counts(self, core_data): """ Queries each core to get individual counts for each core for each shard. """ if core_data['base_url'] not in self.solr_clients: from SolrClient import SolrClient self.solr_clients['base_url'] = SolrClient(core_data['base_url'], log=self.logger) try: return self.solr_clients['base_url'].query(core_data['core'], {'q': '*:*', 'rows': 0, 'distrib': 'false', }).get_num_found() except Exception as e: self.logger.error("Couldn't get Counts for {}/{}".format(core_data['base_url'], core_data['core'])) self.logger.exception(e) return False
python
def _get_collection_counts(self, core_data): """ Queries each core to get individual counts for each core for each shard. """ if core_data['base_url'] not in self.solr_clients: from SolrClient import SolrClient self.solr_clients['base_url'] = SolrClient(core_data['base_url'], log=self.logger) try: return self.solr_clients['base_url'].query(core_data['core'], {'q': '*:*', 'rows': 0, 'distrib': 'false', }).get_num_found() except Exception as e: self.logger.error("Couldn't get Counts for {}/{}".format(core_data['base_url'], core_data['core'])) self.logger.exception(e) return False
[ "def", "_get_collection_counts", "(", "self", ",", "core_data", ")", ":", "if", "core_data", "[", "'base_url'", "]", "not", "in", "self", ".", "solr_clients", ":", "from", "SolrClient", "import", "SolrClient", "self", ".", "solr_clients", "[", "'base_url'", "]", "=", "SolrClient", "(", "core_data", "[", "'base_url'", "]", ",", "log", "=", "self", ".", "logger", ")", "try", ":", "return", "self", ".", "solr_clients", "[", "'base_url'", "]", ".", "query", "(", "core_data", "[", "'core'", "]", ",", "{", "'q'", ":", "'*:*'", ",", "'rows'", ":", "0", ",", "'distrib'", ":", "'false'", ",", "}", ")", ".", "get_num_found", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "\"Couldn't get Counts for {}/{}\"", ".", "format", "(", "core_data", "[", "'base_url'", "]", ",", "core_data", "[", "'core'", "]", ")", ")", "self", ".", "logger", ".", "exception", "(", "e", ")", "return", "False" ]
Queries each core to get individual counts for each core for each shard.
[ "Queries", "each", "core", "to", "get", "individual", "counts", "for", "each", "core", "for", "each", "shard", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/collections.py#L109-L125
train
moonlitesolutions/SolrClient
SolrClient/collections.py
Collections.check_status
def check_status(self, ignore=(), status=None): """ Checks status of each collection and shard to make sure that: a) Cluster state is active b) Number of docs matches across replicas for a given shard. Returns a dict of results for custom alerting. """ self.SHARD_CHECKS = [ {'check_msg': 'Bad Core Count Check', 'f': self._check_shard_count}, {'check_msg': 'Bad Shard Cluster Status', 'f': self._check_shard_status} ] if status is None: status = self.clusterstatus() out = {} for collection in status: out[collection] = {} out[collection]['coll_status'] = True # Means it's fine out[collection]['coll_messages'] = [] for shard in status[collection]: self.logger.debug("Checking {}/{}".format(collection, shard)) s_dict = status[collection][shard] for check in self.SHARD_CHECKS: if check['check_msg'] in ignore: continue res = check['f'](s_dict) if not res: out[collection]['coll_status'] = False if check['check_msg'] not in out[collection]['coll_messages']: out[collection]['coll_messages'].append(check['check_msg']) self.logger.debug(s_dict) return out
python
def check_status(self, ignore=(), status=None): """ Checks status of each collection and shard to make sure that: a) Cluster state is active b) Number of docs matches across replicas for a given shard. Returns a dict of results for custom alerting. """ self.SHARD_CHECKS = [ {'check_msg': 'Bad Core Count Check', 'f': self._check_shard_count}, {'check_msg': 'Bad Shard Cluster Status', 'f': self._check_shard_status} ] if status is None: status = self.clusterstatus() out = {} for collection in status: out[collection] = {} out[collection]['coll_status'] = True # Means it's fine out[collection]['coll_messages'] = [] for shard in status[collection]: self.logger.debug("Checking {}/{}".format(collection, shard)) s_dict = status[collection][shard] for check in self.SHARD_CHECKS: if check['check_msg'] in ignore: continue res = check['f'](s_dict) if not res: out[collection]['coll_status'] = False if check['check_msg'] not in out[collection]['coll_messages']: out[collection]['coll_messages'].append(check['check_msg']) self.logger.debug(s_dict) return out
[ "def", "check_status", "(", "self", ",", "ignore", "=", "(", ")", ",", "status", "=", "None", ")", ":", "self", ".", "SHARD_CHECKS", "=", "[", "{", "'check_msg'", ":", "'Bad Core Count Check'", ",", "'f'", ":", "self", ".", "_check_shard_count", "}", ",", "{", "'check_msg'", ":", "'Bad Shard Cluster Status'", ",", "'f'", ":", "self", ".", "_check_shard_status", "}", "]", "if", "status", "is", "None", ":", "status", "=", "self", ".", "clusterstatus", "(", ")", "out", "=", "{", "}", "for", "collection", "in", "status", ":", "out", "[", "collection", "]", "=", "{", "}", "out", "[", "collection", "]", "[", "'coll_status'", "]", "=", "True", "# Means it's fine", "out", "[", "collection", "]", "[", "'coll_messages'", "]", "=", "[", "]", "for", "shard", "in", "status", "[", "collection", "]", ":", "self", ".", "logger", ".", "debug", "(", "\"Checking {}/{}\"", ".", "format", "(", "collection", ",", "shard", ")", ")", "s_dict", "=", "status", "[", "collection", "]", "[", "shard", "]", "for", "check", "in", "self", ".", "SHARD_CHECKS", ":", "if", "check", "[", "'check_msg'", "]", "in", "ignore", ":", "continue", "res", "=", "check", "[", "'f'", "]", "(", "s_dict", ")", "if", "not", "res", ":", "out", "[", "collection", "]", "[", "'coll_status'", "]", "=", "False", "if", "check", "[", "'check_msg'", "]", "not", "in", "out", "[", "collection", "]", "[", "'coll_messages'", "]", ":", "out", "[", "collection", "]", "[", "'coll_messages'", "]", ".", "append", "(", "check", "[", "'check_msg'", "]", ")", "self", ".", "logger", ".", "debug", "(", "s_dict", ")", "return", "out" ]
Checks status of each collection and shard to make sure that: a) Cluster state is active b) Number of docs matches across replicas for a given shard. Returns a dict of results for custom alerting.
[ "Checks", "status", "of", "each", "collection", "and", "shard", "to", "make", "sure", "that", ":", "a", ")", "Cluster", "state", "is", "active", "b", ")", "Number", "of", "docs", "matches", "across", "replicas", "for", "a", "given", "shard", ".", "Returns", "a", "dict", "of", "results", "for", "custom", "alerting", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/collections.py#L142-L172
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
Reindexer.reindex
def reindex(self, fq= [], **kwargs): ''' Starts Reindexing Process. All parameter arguments will be passed down to the getter function. :param string fq: FilterQuery to pass to source Solr to retrieve items. This can be used to limit the results. ''' for items in self._getter(fq=fq, **kwargs): self._putter(items) if type(self._dest) is SolrClient and self._dest_coll: self.log.info("Finished Indexing, sending a commit") self._dest.commit(self._dest_coll, openSearcher=True)
python
def reindex(self, fq= [], **kwargs): ''' Starts Reindexing Process. All parameter arguments will be passed down to the getter function. :param string fq: FilterQuery to pass to source Solr to retrieve items. This can be used to limit the results. ''' for items in self._getter(fq=fq, **kwargs): self._putter(items) if type(self._dest) is SolrClient and self._dest_coll: self.log.info("Finished Indexing, sending a commit") self._dest.commit(self._dest_coll, openSearcher=True)
[ "def", "reindex", "(", "self", ",", "fq", "=", "[", "]", ",", "*", "*", "kwargs", ")", ":", "for", "items", "in", "self", ".", "_getter", "(", "fq", "=", "fq", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_putter", "(", "items", ")", "if", "type", "(", "self", ".", "_dest", ")", "is", "SolrClient", "and", "self", ".", "_dest_coll", ":", "self", ".", "log", ".", "info", "(", "\"Finished Indexing, sending a commit\"", ")", "self", ".", "_dest", ".", "commit", "(", "self", ".", "_dest_coll", ",", "openSearcher", "=", "True", ")" ]
Starts Reindexing Process. All parameter arguments will be passed down to the getter function. :param string fq: FilterQuery to pass to source Solr to retrieve items. This can be used to limit the results.
[ "Starts", "Reindexing", "Process", ".", "All", "parameter", "arguments", "will", "be", "passed", "down", "to", "the", "getter", "function", ".", ":", "param", "string", "fq", ":", "FilterQuery", "to", "pass", "to", "source", "Solr", "to", "retrieve", "items", ".", "This", "can", "be", "used", "to", "limit", "the", "results", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L87-L96
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
Reindexer._from_solr
def _from_solr(self, fq=[], report_frequency = 25): ''' Method for retrieving batch data from Solr. ''' cursor = '*' stime = datetime.now() query_count = 0 while True: #Get data with starting cursorMark query = self._get_query(cursor) #Add FQ to the query. This is used by resume to filter on date fields and when specifying document subset. #Not included in _get_query for more flexibiilty. if fq: if 'fq' in query: [query['fq'].append(x) for x in fq] else: query['fq'] = fq results = self._source.query(self._source_coll, query) query_count += 1 if query_count % report_frequency == 0: self.log.info("Processed {} Items in {} Seconds. Apprximately {} items/minute".format( self._items_processed, int((datetime.now()-stime).seconds), str(int(self._items_processed / ((datetime.now()-stime).seconds/60))) )) if results.get_results_count(): #If we got items back, get the new cursor and yield the docs self._items_processed += results.get_results_count() cursor = results.get_cursor() #Remove ignore fields docs = self._trim_fields(results.docs) yield docs if results.get_results_count() < self._rows: #Less results than asked, probably done break else: #No Results, probably done :) self.log.debug("Got zero Results with cursor: {}".format(cursor)) break
python
def _from_solr(self, fq=[], report_frequency = 25): ''' Method for retrieving batch data from Solr. ''' cursor = '*' stime = datetime.now() query_count = 0 while True: #Get data with starting cursorMark query = self._get_query(cursor) #Add FQ to the query. This is used by resume to filter on date fields and when specifying document subset. #Not included in _get_query for more flexibiilty. if fq: if 'fq' in query: [query['fq'].append(x) for x in fq] else: query['fq'] = fq results = self._source.query(self._source_coll, query) query_count += 1 if query_count % report_frequency == 0: self.log.info("Processed {} Items in {} Seconds. Apprximately {} items/minute".format( self._items_processed, int((datetime.now()-stime).seconds), str(int(self._items_processed / ((datetime.now()-stime).seconds/60))) )) if results.get_results_count(): #If we got items back, get the new cursor and yield the docs self._items_processed += results.get_results_count() cursor = results.get_cursor() #Remove ignore fields docs = self._trim_fields(results.docs) yield docs if results.get_results_count() < self._rows: #Less results than asked, probably done break else: #No Results, probably done :) self.log.debug("Got zero Results with cursor: {}".format(cursor)) break
[ "def", "_from_solr", "(", "self", ",", "fq", "=", "[", "]", ",", "report_frequency", "=", "25", ")", ":", "cursor", "=", "'*'", "stime", "=", "datetime", ".", "now", "(", ")", "query_count", "=", "0", "while", "True", ":", "#Get data with starting cursorMark\r", "query", "=", "self", ".", "_get_query", "(", "cursor", ")", "#Add FQ to the query. This is used by resume to filter on date fields and when specifying document subset.\r", "#Not included in _get_query for more flexibiilty.\r", "if", "fq", ":", "if", "'fq'", "in", "query", ":", "[", "query", "[", "'fq'", "]", ".", "append", "(", "x", ")", "for", "x", "in", "fq", "]", "else", ":", "query", "[", "'fq'", "]", "=", "fq", "results", "=", "self", ".", "_source", ".", "query", "(", "self", ".", "_source_coll", ",", "query", ")", "query_count", "+=", "1", "if", "query_count", "%", "report_frequency", "==", "0", ":", "self", ".", "log", ".", "info", "(", "\"Processed {} Items in {} Seconds. Apprximately {} items/minute\"", ".", "format", "(", "self", ".", "_items_processed", ",", "int", "(", "(", "datetime", ".", "now", "(", ")", "-", "stime", ")", ".", "seconds", ")", ",", "str", "(", "int", "(", "self", ".", "_items_processed", "/", "(", "(", "datetime", ".", "now", "(", ")", "-", "stime", ")", ".", "seconds", "/", "60", ")", ")", ")", ")", ")", "if", "results", ".", "get_results_count", "(", ")", ":", "#If we got items back, get the new cursor and yield the docs\r", "self", ".", "_items_processed", "+=", "results", ".", "get_results_count", "(", ")", "cursor", "=", "results", ".", "get_cursor", "(", ")", "#Remove ignore fields\r", "docs", "=", "self", ".", "_trim_fields", "(", "results", ".", "docs", ")", "yield", "docs", "if", "results", ".", "get_results_count", "(", ")", "<", "self", ".", "_rows", ":", "#Less results than asked, probably done\r", "break", "else", ":", "#No Results, probably done :)\r", "self", ".", "log", ".", "debug", "(", "\"Got zero Results with cursor: {}\"", ".", "format", "(", "cursor", ")", ")", "break" ]
Method for retrieving batch data from Solr.
[ "Method", "for", "retrieving", "batch", "data", "from", "Solr", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L99-L139
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
Reindexer._trim_fields
def _trim_fields(self, docs): ''' Removes ignore fields from the data that we got from Solr. ''' for doc in docs: for field in self._ignore_fields: if field in doc: del(doc[field]) return docs
python
def _trim_fields(self, docs): ''' Removes ignore fields from the data that we got from Solr. ''' for doc in docs: for field in self._ignore_fields: if field in doc: del(doc[field]) return docs
[ "def", "_trim_fields", "(", "self", ",", "docs", ")", ":", "for", "doc", "in", "docs", ":", "for", "field", "in", "self", ".", "_ignore_fields", ":", "if", "field", "in", "doc", ":", "del", "(", "doc", "[", "field", "]", ")", "return", "docs" ]
Removes ignore fields from the data that we got from Solr.
[ "Removes", "ignore", "fields", "from", "the", "data", "that", "we", "got", "from", "Solr", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L142-L150
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
Reindexer._get_query
def _get_query(self, cursor): ''' Query tempalte for source Solr, sorts by id by default. ''' query = {'q':'*:*', 'sort':'id desc', 'rows':self._rows, 'cursorMark':cursor} if self._date_field: query['sort'] = "{} asc, id desc".format(self._date_field) if self._per_shard: query['distrib'] = 'false' return query
python
def _get_query(self, cursor): ''' Query tempalte for source Solr, sorts by id by default. ''' query = {'q':'*:*', 'sort':'id desc', 'rows':self._rows, 'cursorMark':cursor} if self._date_field: query['sort'] = "{} asc, id desc".format(self._date_field) if self._per_shard: query['distrib'] = 'false' return query
[ "def", "_get_query", "(", "self", ",", "cursor", ")", ":", "query", "=", "{", "'q'", ":", "'*:*'", ",", "'sort'", ":", "'id desc'", ",", "'rows'", ":", "self", ".", "_rows", ",", "'cursorMark'", ":", "cursor", "}", "if", "self", ".", "_date_field", ":", "query", "[", "'sort'", "]", "=", "\"{} asc, id desc\"", ".", "format", "(", "self", ".", "_date_field", ")", "if", "self", ".", "_per_shard", ":", "query", "[", "'distrib'", "]", "=", "'false'", "return", "query" ]
Query tempalte for source Solr, sorts by id by default.
[ "Query", "tempalte", "for", "source", "Solr", "sorts", "by", "id", "by", "default", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L153-L165
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
Reindexer._to_solr
def _to_solr(self, data): ''' Sends data to a Solr instance. ''' return self._dest.index_json(self._dest_coll, json.dumps(data,sort_keys=True))
python
def _to_solr(self, data): ''' Sends data to a Solr instance. ''' return self._dest.index_json(self._dest_coll, json.dumps(data,sort_keys=True))
[ "def", "_to_solr", "(", "self", ",", "data", ")", ":", "return", "self", ".", "_dest", ".", "index_json", "(", "self", ".", "_dest_coll", ",", "json", ".", "dumps", "(", "data", ",", "sort_keys", "=", "True", ")", ")" ]
Sends data to a Solr instance.
[ "Sends", "data", "to", "a", "Solr", "instance", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L175-L179
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
Reindexer._get_date_range_query
def _get_date_range_query(self, start_date, end_date, timespan= 'DAY', date_field= None): ''' Gets counts of items per specified date range. :param collection: Solr Collection to use. :param timespan: Solr Date Math compliant value for faceting ex HOUR, MONTH, DAY ''' if date_field is None: date_field = self._date_field query ={'q':'*:*', 'rows':0, 'facet':'true', 'facet.range': date_field, 'facet.range.gap': '+1{}'.format(timespan), 'facet.range.end': '{}'.format(end_date), 'facet.range.start': '{}'.format(start_date), 'facet.range.include': 'all' } if self._per_shard: query['distrib'] = 'false' return query
python
def _get_date_range_query(self, start_date, end_date, timespan= 'DAY', date_field= None): ''' Gets counts of items per specified date range. :param collection: Solr Collection to use. :param timespan: Solr Date Math compliant value for faceting ex HOUR, MONTH, DAY ''' if date_field is None: date_field = self._date_field query ={'q':'*:*', 'rows':0, 'facet':'true', 'facet.range': date_field, 'facet.range.gap': '+1{}'.format(timespan), 'facet.range.end': '{}'.format(end_date), 'facet.range.start': '{}'.format(start_date), 'facet.range.include': 'all' } if self._per_shard: query['distrib'] = 'false' return query
[ "def", "_get_date_range_query", "(", "self", ",", "start_date", ",", "end_date", ",", "timespan", "=", "'DAY'", ",", "date_field", "=", "None", ")", ":", "if", "date_field", "is", "None", ":", "date_field", "=", "self", ".", "_date_field", "query", "=", "{", "'q'", ":", "'*:*'", ",", "'rows'", ":", "0", ",", "'facet'", ":", "'true'", ",", "'facet.range'", ":", "date_field", ",", "'facet.range.gap'", ":", "'+1{}'", ".", "format", "(", "timespan", ")", ",", "'facet.range.end'", ":", "'{}'", ".", "format", "(", "end_date", ")", ",", "'facet.range.start'", ":", "'{}'", ".", "format", "(", "start_date", ")", ",", "'facet.range.include'", ":", "'all'", "}", "if", "self", ".", "_per_shard", ":", "query", "[", "'distrib'", "]", "=", "'false'", "return", "query" ]
Gets counts of items per specified date range. :param collection: Solr Collection to use. :param timespan: Solr Date Math compliant value for faceting ex HOUR, MONTH, DAY
[ "Gets", "counts", "of", "items", "per", "specified", "date", "range", ".", ":", "param", "collection", ":", "Solr", "Collection", "to", "use", ".", ":", "param", "timespan", ":", "Solr", "Date", "Math", "compliant", "value", "for", "faceting", "ex", "HOUR", "MONTH", "DAY" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L182-L201
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
Reindexer._get_edge_date
def _get_edge_date(self, date_field, sort): ''' This method is used to get start and end dates for the collection. ''' return self._source.query(self._source_coll, { 'q':'*:*', 'rows':1, 'fq':'+{}:*'.format(date_field), 'sort':'{} {}'.format(date_field, sort)}).docs[0][date_field]
python
def _get_edge_date(self, date_field, sort): ''' This method is used to get start and end dates for the collection. ''' return self._source.query(self._source_coll, { 'q':'*:*', 'rows':1, 'fq':'+{}:*'.format(date_field), 'sort':'{} {}'.format(date_field, sort)}).docs[0][date_field]
[ "def", "_get_edge_date", "(", "self", ",", "date_field", ",", "sort", ")", ":", "return", "self", ".", "_source", ".", "query", "(", "self", ".", "_source_coll", ",", "{", "'q'", ":", "'*:*'", ",", "'rows'", ":", "1", ",", "'fq'", ":", "'+{}:*'", ".", "format", "(", "date_field", ")", ",", "'sort'", ":", "'{} {}'", ".", "format", "(", "date_field", ",", "sort", ")", "}", ")", ".", "docs", "[", "0", "]", "[", "date_field", "]" ]
This method is used to get start and end dates for the collection.
[ "This", "method", "is", "used", "to", "get", "start", "and", "end", "dates", "for", "the", "collection", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L204-L212
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
Reindexer._get_date_facet_counts
def _get_date_facet_counts(self, timespan, date_field, start_date=None, end_date=None): ''' Returns Range Facet counts based on ''' if 'DAY' not in timespan: raise ValueError("At this time, only DAY date range increment is supported. Aborting..... ") #Need to do this a bit better later. Don't like the string and date concatenations. if not start_date: start_date = self._get_edge_date(date_field, 'asc') start_date = datetime.strptime(start_date,'%Y-%m-%dT%H:%M:%S.%fZ').date().isoformat()+'T00:00:00.000Z' else: start_date = start_date+'T00:00:00.000Z' if not end_date: end_date = self._get_edge_date(date_field, 'desc') end_date = datetime.strptime(end_date,'%Y-%m-%dT%H:%M:%S.%fZ').date() end_date += timedelta(days=1) end_date = end_date.isoformat()+'T00:00:00.000Z' else: end_date = end_date+'T00:00:00.000Z' self.log.info("Processing Items from {} to {}".format(start_date, end_date)) #Get facet counts for source and destination collections source_facet = self._source.query(self._source_coll, self._get_date_range_query(timespan=timespan, start_date=start_date, end_date=end_date) ).get_facets_ranges()[date_field] dest_facet = self._dest.query( self._dest_coll, self._get_date_range_query( timespan=timespan, start_date=start_date, end_date=end_date )).get_facets_ranges()[date_field] return source_facet, dest_facet
python
def _get_date_facet_counts(self, timespan, date_field, start_date=None, end_date=None): ''' Returns Range Facet counts based on ''' if 'DAY' not in timespan: raise ValueError("At this time, only DAY date range increment is supported. Aborting..... ") #Need to do this a bit better later. Don't like the string and date concatenations. if not start_date: start_date = self._get_edge_date(date_field, 'asc') start_date = datetime.strptime(start_date,'%Y-%m-%dT%H:%M:%S.%fZ').date().isoformat()+'T00:00:00.000Z' else: start_date = start_date+'T00:00:00.000Z' if not end_date: end_date = self._get_edge_date(date_field, 'desc') end_date = datetime.strptime(end_date,'%Y-%m-%dT%H:%M:%S.%fZ').date() end_date += timedelta(days=1) end_date = end_date.isoformat()+'T00:00:00.000Z' else: end_date = end_date+'T00:00:00.000Z' self.log.info("Processing Items from {} to {}".format(start_date, end_date)) #Get facet counts for source and destination collections source_facet = self._source.query(self._source_coll, self._get_date_range_query(timespan=timespan, start_date=start_date, end_date=end_date) ).get_facets_ranges()[date_field] dest_facet = self._dest.query( self._dest_coll, self._get_date_range_query( timespan=timespan, start_date=start_date, end_date=end_date )).get_facets_ranges()[date_field] return source_facet, dest_facet
[ "def", "_get_date_facet_counts", "(", "self", ",", "timespan", ",", "date_field", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ")", ":", "if", "'DAY'", "not", "in", "timespan", ":", "raise", "ValueError", "(", "\"At this time, only DAY date range increment is supported. Aborting..... \"", ")", "#Need to do this a bit better later. Don't like the string and date concatenations.\r", "if", "not", "start_date", ":", "start_date", "=", "self", ".", "_get_edge_date", "(", "date_field", ",", "'asc'", ")", "start_date", "=", "datetime", ".", "strptime", "(", "start_date", ",", "'%Y-%m-%dT%H:%M:%S.%fZ'", ")", ".", "date", "(", ")", ".", "isoformat", "(", ")", "+", "'T00:00:00.000Z'", "else", ":", "start_date", "=", "start_date", "+", "'T00:00:00.000Z'", "if", "not", "end_date", ":", "end_date", "=", "self", ".", "_get_edge_date", "(", "date_field", ",", "'desc'", ")", "end_date", "=", "datetime", ".", "strptime", "(", "end_date", ",", "'%Y-%m-%dT%H:%M:%S.%fZ'", ")", ".", "date", "(", ")", "end_date", "+=", "timedelta", "(", "days", "=", "1", ")", "end_date", "=", "end_date", ".", "isoformat", "(", ")", "+", "'T00:00:00.000Z'", "else", ":", "end_date", "=", "end_date", "+", "'T00:00:00.000Z'", "self", ".", "log", ".", "info", "(", "\"Processing Items from {} to {}\"", ".", "format", "(", "start_date", ",", "end_date", ")", ")", "#Get facet counts for source and destination collections\r", "source_facet", "=", "self", ".", "_source", ".", "query", "(", "self", ".", "_source_coll", ",", "self", ".", "_get_date_range_query", "(", "timespan", "=", "timespan", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ")", ")", ".", "get_facets_ranges", "(", ")", "[", "date_field", "]", "dest_facet", "=", "self", ".", "_dest", ".", "query", "(", "self", ".", "_dest_coll", ",", "self", ".", "_get_date_range_query", "(", "timespan", "=", "timespan", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ")", ")", ".", "get_facets_ranges", "(", ")", "[", "date_field", "]", "return", "source_facet", ",", "dest_facet" ]
Returns Range Facet counts based on
[ "Returns", "Range", "Facet", "counts", "based", "on" ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L215-L248
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
Reindexer.resume
def resume(self, start_date=None, end_date=None, timespan='DAY', check= False): ''' This method may help if the original run was interrupted for some reason. It will only work under the following conditions * You have a date field that you can facet on * Indexing was stopped for the duration of the copy The way this tries to resume re-indexing is by running a date range facet on the source and destination collections. It then compares the counts in both collections for each timespan specified. If the counts are different, it will re-index items for each range where the counts are off. You can also pass in a start_date to only get items after a certain time period. Note that each date range will be indexed in it's entirety, even if there is only one item missing. Keep in mind this only checks the counts and not actual data. So make the indexes weren't modified between the reindexing execution and running the resume operation. :param start_date: Date to start indexing from. If not specified there will be no restrictions and all data will be processed. Note that this value will be passed to Solr directly and not modified. :param end_date: The date to index items up to. Solr Date Math compliant value for faceting; currenlty only DAY is supported. :param timespan: Solr Date Math compliant value for faceting; currenlty only DAY is supported. :param check: If set to True it will only log differences between the two collections without actually modifying the destination. ''' if type(self._source) is not SolrClient or type(self._dest) is not SolrClient: raise ValueError("To resume, both source and destination need to be Solr.") source_facet, dest_facet = self._get_date_facet_counts(timespan, self._date_field, start_date=start_date, end_date=end_date) for dt_range in sorted(source_facet): if dt_range in dest_facet: self.log.info("Date Range: {} Source: {} Destination:{} Difference:{}".format( dt_range, source_facet[dt_range], dest_facet[dt_range], (source_facet[dt_range]-dest_facet[dt_range]))) if check: continue if source_facet[dt_range] > dest_facet[dt_range]: #Kicks off reindexing with an additional FQ self.reindex(fq=['{}:[{} TO {}]'.format(self._date_field, dt_range, dt_range+'+1{}'.format(timespan))]) self.log.info("Complete Date Range {}".format(dt_range)) else: self.log.error("Something went wrong; destinationSource: {}".format(source_facet)) self.log.error("Destination: {}".format(dest_facet)) raise ValueError("Date Ranges don't match up") self._dest.commit(self._dest_coll, openSearcher=True)
python
def resume(self, start_date=None, end_date=None, timespan='DAY', check= False): ''' This method may help if the original run was interrupted for some reason. It will only work under the following conditions * You have a date field that you can facet on * Indexing was stopped for the duration of the copy The way this tries to resume re-indexing is by running a date range facet on the source and destination collections. It then compares the counts in both collections for each timespan specified. If the counts are different, it will re-index items for each range where the counts are off. You can also pass in a start_date to only get items after a certain time period. Note that each date range will be indexed in it's entirety, even if there is only one item missing. Keep in mind this only checks the counts and not actual data. So make the indexes weren't modified between the reindexing execution and running the resume operation. :param start_date: Date to start indexing from. If not specified there will be no restrictions and all data will be processed. Note that this value will be passed to Solr directly and not modified. :param end_date: The date to index items up to. Solr Date Math compliant value for faceting; currenlty only DAY is supported. :param timespan: Solr Date Math compliant value for faceting; currenlty only DAY is supported. :param check: If set to True it will only log differences between the two collections without actually modifying the destination. ''' if type(self._source) is not SolrClient or type(self._dest) is not SolrClient: raise ValueError("To resume, both source and destination need to be Solr.") source_facet, dest_facet = self._get_date_facet_counts(timespan, self._date_field, start_date=start_date, end_date=end_date) for dt_range in sorted(source_facet): if dt_range in dest_facet: self.log.info("Date Range: {} Source: {} Destination:{} Difference:{}".format( dt_range, source_facet[dt_range], dest_facet[dt_range], (source_facet[dt_range]-dest_facet[dt_range]))) if check: continue if source_facet[dt_range] > dest_facet[dt_range]: #Kicks off reindexing with an additional FQ self.reindex(fq=['{}:[{} TO {}]'.format(self._date_field, dt_range, dt_range+'+1{}'.format(timespan))]) self.log.info("Complete Date Range {}".format(dt_range)) else: self.log.error("Something went wrong; destinationSource: {}".format(source_facet)) self.log.error("Destination: {}".format(dest_facet)) raise ValueError("Date Ranges don't match up") self._dest.commit(self._dest_coll, openSearcher=True)
[ "def", "resume", "(", "self", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "timespan", "=", "'DAY'", ",", "check", "=", "False", ")", ":", "if", "type", "(", "self", ".", "_source", ")", "is", "not", "SolrClient", "or", "type", "(", "self", ".", "_dest", ")", "is", "not", "SolrClient", ":", "raise", "ValueError", "(", "\"To resume, both source and destination need to be Solr.\"", ")", "source_facet", ",", "dest_facet", "=", "self", ".", "_get_date_facet_counts", "(", "timespan", ",", "self", ".", "_date_field", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ")", "for", "dt_range", "in", "sorted", "(", "source_facet", ")", ":", "if", "dt_range", "in", "dest_facet", ":", "self", ".", "log", ".", "info", "(", "\"Date Range: {} Source: {} Destination:{} Difference:{}\"", ".", "format", "(", "dt_range", ",", "source_facet", "[", "dt_range", "]", ",", "dest_facet", "[", "dt_range", "]", ",", "(", "source_facet", "[", "dt_range", "]", "-", "dest_facet", "[", "dt_range", "]", ")", ")", ")", "if", "check", ":", "continue", "if", "source_facet", "[", "dt_range", "]", ">", "dest_facet", "[", "dt_range", "]", ":", "#Kicks off reindexing with an additional FQ\r", "self", ".", "reindex", "(", "fq", "=", "[", "'{}:[{} TO {}]'", ".", "format", "(", "self", ".", "_date_field", ",", "dt_range", ",", "dt_range", "+", "'+1{}'", ".", "format", "(", "timespan", ")", ")", "]", ")", "self", ".", "log", ".", "info", "(", "\"Complete Date Range {}\"", ".", "format", "(", "dt_range", ")", ")", "else", ":", "self", ".", "log", ".", "error", "(", "\"Something went wrong; destinationSource: {}\"", ".", "format", "(", "source_facet", ")", ")", "self", ".", "log", ".", "error", "(", "\"Destination: {}\"", ".", "format", "(", "dest_facet", ")", ")", "raise", "ValueError", "(", "\"Date Ranges don't match up\"", ")", "self", ".", "_dest", ".", "commit", "(", "self", ".", "_dest_coll", ",", "openSearcher", "=", "True", ")" ]
This method may help if the original run was interrupted for some reason. It will only work under the following conditions * You have a date field that you can facet on * Indexing was stopped for the duration of the copy The way this tries to resume re-indexing is by running a date range facet on the source and destination collections. It then compares the counts in both collections for each timespan specified. If the counts are different, it will re-index items for each range where the counts are off. You can also pass in a start_date to only get items after a certain time period. Note that each date range will be indexed in it's entirety, even if there is only one item missing. Keep in mind this only checks the counts and not actual data. So make the indexes weren't modified between the reindexing execution and running the resume operation. :param start_date: Date to start indexing from. If not specified there will be no restrictions and all data will be processed. Note that this value will be passed to Solr directly and not modified. :param end_date: The date to index items up to. Solr Date Math compliant value for faceting; currenlty only DAY is supported. :param timespan: Solr Date Math compliant value for faceting; currenlty only DAY is supported. :param check: If set to True it will only log differences between the two collections without actually modifying the destination.
[ "This", "method", "may", "help", "if", "the", "original", "run", "was", "interrupted", "for", "some", "reason", ".", "It", "will", "only", "work", "under", "the", "following", "conditions", "*", "You", "have", "a", "date", "field", "that", "you", "can", "facet", "on", "*", "Indexing", "was", "stopped", "for", "the", "duration", "of", "the", "copy", "The", "way", "this", "tries", "to", "resume", "re", "-", "indexing", "is", "by", "running", "a", "date", "range", "facet", "on", "the", "source", "and", "destination", "collections", ".", "It", "then", "compares", "the", "counts", "in", "both", "collections", "for", "each", "timespan", "specified", ".", "If", "the", "counts", "are", "different", "it", "will", "re", "-", "index", "items", "for", "each", "range", "where", "the", "counts", "are", "off", ".", "You", "can", "also", "pass", "in", "a", "start_date", "to", "only", "get", "items", "after", "a", "certain", "time", "period", ".", "Note", "that", "each", "date", "range", "will", "be", "indexed", "in", "it", "s", "entirety", "even", "if", "there", "is", "only", "one", "item", "missing", ".", "Keep", "in", "mind", "this", "only", "checks", "the", "counts", "and", "not", "actual", "data", ".", "So", "make", "the", "indexes", "weren", "t", "modified", "between", "the", "reindexing", "execution", "and", "running", "the", "resume", "operation", ".", ":", "param", "start_date", ":", "Date", "to", "start", "indexing", "from", ".", "If", "not", "specified", "there", "will", "be", "no", "restrictions", "and", "all", "data", "will", "be", "processed", ".", "Note", "that", "this", "value", "will", "be", "passed", "to", "Solr", "directly", "and", "not", "modified", ".", ":", "param", "end_date", ":", "The", "date", "to", "index", "items", "up", "to", ".", "Solr", "Date", "Math", "compliant", "value", "for", "faceting", ";", "currenlty", "only", "DAY", "is", "supported", ".", ":", "param", "timespan", ":", "Solr", "Date", "Math", "compliant", "value", "for", "faceting", ";", "currenlty", "only", "DAY", "is", "supported", ".", ":", "param", "check", ":", "If", "set", "to", "True", "it", "will", "only", "log", "differences", "between", "the", "two", "collections", "without", "actually", "modifying", "the", "destination", "." ]
19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L251-L291
train
bitprophet/ssh
ssh/transport.py
Transport.start_client
def start_client(self, event=None): """ Negotiate a new SSH2 session as a client. This is the first step after creating a new L{Transport}. A separate thread is created for protocol negotiation. If an event is passed in, this method returns immediately. When negotiation is done (successful or not), the given C{Event} will be triggered. On failure, L{is_active} will return C{False}. (Since 1.4) If C{event} is C{None}, this method will not return until negotation is done. On success, the method returns normally. Otherwise an SSHException is raised. After a successful negotiation, you will usually want to authenticate, calling L{auth_password <Transport.auth_password>} or L{auth_publickey <Transport.auth_publickey>}. @note: L{connect} is a simpler method for connecting as a client. @note: After calling this method (or L{start_server} or L{connect}), you should no longer directly read from or write to the original socket object. @param event: an event to trigger when negotiation is complete (optional) @type event: threading.Event @raise SSHException: if negotiation fails (and no C{event} was passed in) """ self.active = True if event is not None: # async, return immediately and let the app poll for completion self.completion_event = event self.start() return # synchronous, wait for a result self.completion_event = event = threading.Event() self.start() Random.atfork() while True: event.wait(0.1) if not self.active: e = self.get_exception() if e is not None: raise e raise SSHException('Negotiation failed.') if event.isSet(): break
python
def start_client(self, event=None): """ Negotiate a new SSH2 session as a client. This is the first step after creating a new L{Transport}. A separate thread is created for protocol negotiation. If an event is passed in, this method returns immediately. When negotiation is done (successful or not), the given C{Event} will be triggered. On failure, L{is_active} will return C{False}. (Since 1.4) If C{event} is C{None}, this method will not return until negotation is done. On success, the method returns normally. Otherwise an SSHException is raised. After a successful negotiation, you will usually want to authenticate, calling L{auth_password <Transport.auth_password>} or L{auth_publickey <Transport.auth_publickey>}. @note: L{connect} is a simpler method for connecting as a client. @note: After calling this method (or L{start_server} or L{connect}), you should no longer directly read from or write to the original socket object. @param event: an event to trigger when negotiation is complete (optional) @type event: threading.Event @raise SSHException: if negotiation fails (and no C{event} was passed in) """ self.active = True if event is not None: # async, return immediately and let the app poll for completion self.completion_event = event self.start() return # synchronous, wait for a result self.completion_event = event = threading.Event() self.start() Random.atfork() while True: event.wait(0.1) if not self.active: e = self.get_exception() if e is not None: raise e raise SSHException('Negotiation failed.') if event.isSet(): break
[ "def", "start_client", "(", "self", ",", "event", "=", "None", ")", ":", "self", ".", "active", "=", "True", "if", "event", "is", "not", "None", ":", "# async, return immediately and let the app poll for completion", "self", ".", "completion_event", "=", "event", "self", ".", "start", "(", ")", "return", "# synchronous, wait for a result", "self", ".", "completion_event", "=", "event", "=", "threading", ".", "Event", "(", ")", "self", ".", "start", "(", ")", "Random", ".", "atfork", "(", ")", "while", "True", ":", "event", ".", "wait", "(", "0.1", ")", "if", "not", "self", ".", "active", ":", "e", "=", "self", ".", "get_exception", "(", ")", "if", "e", "is", "not", "None", ":", "raise", "e", "raise", "SSHException", "(", "'Negotiation failed.'", ")", "if", "event", ".", "isSet", "(", ")", ":", "break" ]
Negotiate a new SSH2 session as a client. This is the first step after creating a new L{Transport}. A separate thread is created for protocol negotiation. If an event is passed in, this method returns immediately. When negotiation is done (successful or not), the given C{Event} will be triggered. On failure, L{is_active} will return C{False}. (Since 1.4) If C{event} is C{None}, this method will not return until negotation is done. On success, the method returns normally. Otherwise an SSHException is raised. After a successful negotiation, you will usually want to authenticate, calling L{auth_password <Transport.auth_password>} or L{auth_publickey <Transport.auth_publickey>}. @note: L{connect} is a simpler method for connecting as a client. @note: After calling this method (or L{start_server} or L{connect}), you should no longer directly read from or write to the original socket object. @param event: an event to trigger when negotiation is complete (optional) @type event: threading.Event @raise SSHException: if negotiation fails (and no C{event} was passed in)
[ "Negotiate", "a", "new", "SSH2", "session", "as", "a", "client", ".", "This", "is", "the", "first", "step", "after", "creating", "a", "new", "L", "{", "Transport", "}", ".", "A", "separate", "thread", "is", "created", "for", "protocol", "negotiation", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L417-L467
train
bitprophet/ssh
ssh/transport.py
Transport.start_server
def start_server(self, event=None, server=None): """ Negotiate a new SSH2 session as a server. This is the first step after creating a new L{Transport} and setting up your server host key(s). A separate thread is created for protocol negotiation. If an event is passed in, this method returns immediately. When negotiation is done (successful or not), the given C{Event} will be triggered. On failure, L{is_active} will return C{False}. (Since 1.4) If C{event} is C{None}, this method will not return until negotation is done. On success, the method returns normally. Otherwise an SSHException is raised. After a successful negotiation, the client will need to authenticate. Override the methods L{get_allowed_auths <ServerInterface.get_allowed_auths>}, L{check_auth_none <ServerInterface.check_auth_none>}, L{check_auth_password <ServerInterface.check_auth_password>}, and L{check_auth_publickey <ServerInterface.check_auth_publickey>} in the given C{server} object to control the authentication process. After a successful authentication, the client should request to open a channel. Override L{check_channel_request <ServerInterface.check_channel_request>} in the given C{server} object to allow channels to be opened. @note: After calling this method (or L{start_client} or L{connect}), you should no longer directly read from or write to the original socket object. @param event: an event to trigger when negotiation is complete. @type event: threading.Event @param server: an object used to perform authentication and create L{Channel}s. @type server: L{server.ServerInterface} @raise SSHException: if negotiation fails (and no C{event} was passed in) """ if server is None: server = ServerInterface() self.server_mode = True self.server_object = server self.active = True if event is not None: # async, return immediately and let the app poll for completion self.completion_event = event self.start() return # synchronous, wait for a result self.completion_event = event = threading.Event() self.start() while True: event.wait(0.1) if not self.active: e = self.get_exception() if e is not None: raise e raise SSHException('Negotiation failed.') if event.isSet(): break
python
def start_server(self, event=None, server=None): """ Negotiate a new SSH2 session as a server. This is the first step after creating a new L{Transport} and setting up your server host key(s). A separate thread is created for protocol negotiation. If an event is passed in, this method returns immediately. When negotiation is done (successful or not), the given C{Event} will be triggered. On failure, L{is_active} will return C{False}. (Since 1.4) If C{event} is C{None}, this method will not return until negotation is done. On success, the method returns normally. Otherwise an SSHException is raised. After a successful negotiation, the client will need to authenticate. Override the methods L{get_allowed_auths <ServerInterface.get_allowed_auths>}, L{check_auth_none <ServerInterface.check_auth_none>}, L{check_auth_password <ServerInterface.check_auth_password>}, and L{check_auth_publickey <ServerInterface.check_auth_publickey>} in the given C{server} object to control the authentication process. After a successful authentication, the client should request to open a channel. Override L{check_channel_request <ServerInterface.check_channel_request>} in the given C{server} object to allow channels to be opened. @note: After calling this method (or L{start_client} or L{connect}), you should no longer directly read from or write to the original socket object. @param event: an event to trigger when negotiation is complete. @type event: threading.Event @param server: an object used to perform authentication and create L{Channel}s. @type server: L{server.ServerInterface} @raise SSHException: if negotiation fails (and no C{event} was passed in) """ if server is None: server = ServerInterface() self.server_mode = True self.server_object = server self.active = True if event is not None: # async, return immediately and let the app poll for completion self.completion_event = event self.start() return # synchronous, wait for a result self.completion_event = event = threading.Event() self.start() while True: event.wait(0.1) if not self.active: e = self.get_exception() if e is not None: raise e raise SSHException('Negotiation failed.') if event.isSet(): break
[ "def", "start_server", "(", "self", ",", "event", "=", "None", ",", "server", "=", "None", ")", ":", "if", "server", "is", "None", ":", "server", "=", "ServerInterface", "(", ")", "self", ".", "server_mode", "=", "True", "self", ".", "server_object", "=", "server", "self", ".", "active", "=", "True", "if", "event", "is", "not", "None", ":", "# async, return immediately and let the app poll for completion", "self", ".", "completion_event", "=", "event", "self", ".", "start", "(", ")", "return", "# synchronous, wait for a result", "self", ".", "completion_event", "=", "event", "=", "threading", ".", "Event", "(", ")", "self", ".", "start", "(", ")", "while", "True", ":", "event", ".", "wait", "(", "0.1", ")", "if", "not", "self", ".", "active", ":", "e", "=", "self", ".", "get_exception", "(", ")", "if", "e", "is", "not", "None", ":", "raise", "e", "raise", "SSHException", "(", "'Negotiation failed.'", ")", "if", "event", ".", "isSet", "(", ")", ":", "break" ]
Negotiate a new SSH2 session as a server. This is the first step after creating a new L{Transport} and setting up your server host key(s). A separate thread is created for protocol negotiation. If an event is passed in, this method returns immediately. When negotiation is done (successful or not), the given C{Event} will be triggered. On failure, L{is_active} will return C{False}. (Since 1.4) If C{event} is C{None}, this method will not return until negotation is done. On success, the method returns normally. Otherwise an SSHException is raised. After a successful negotiation, the client will need to authenticate. Override the methods L{get_allowed_auths <ServerInterface.get_allowed_auths>}, L{check_auth_none <ServerInterface.check_auth_none>}, L{check_auth_password <ServerInterface.check_auth_password>}, and L{check_auth_publickey <ServerInterface.check_auth_publickey>} in the given C{server} object to control the authentication process. After a successful authentication, the client should request to open a channel. Override L{check_channel_request <ServerInterface.check_channel_request>} in the given C{server} object to allow channels to be opened. @note: After calling this method (or L{start_client} or L{connect}), you should no longer directly read from or write to the original socket object. @param event: an event to trigger when negotiation is complete. @type event: threading.Event @param server: an object used to perform authentication and create L{Channel}s. @type server: L{server.ServerInterface} @raise SSHException: if negotiation fails (and no C{event} was passed in)
[ "Negotiate", "a", "new", "SSH2", "session", "as", "a", "server", ".", "This", "is", "the", "first", "step", "after", "creating", "a", "new", "L", "{", "Transport", "}", "and", "setting", "up", "your", "server", "host", "key", "(", "s", ")", ".", "A", "separate", "thread", "is", "created", "for", "protocol", "negotiation", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L469-L531
train
bitprophet/ssh
ssh/transport.py
Transport.close
def close(self): """ Close this session, and any open channels that are tied to it. """ if not self.active: return self.active = False self.packetizer.close() self.join() for chan in self._channels.values(): chan._unlink()
python
def close(self): """ Close this session, and any open channels that are tied to it. """ if not self.active: return self.active = False self.packetizer.close() self.join() for chan in self._channels.values(): chan._unlink()
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "active", ":", "return", "self", ".", "active", "=", "False", "self", ".", "packetizer", ".", "close", "(", ")", "self", ".", "join", "(", ")", "for", "chan", "in", "self", ".", "_channels", ".", "values", "(", ")", ":", "chan", ".", "_unlink", "(", ")" ]
Close this session, and any open channels that are tied to it.
[ "Close", "this", "session", "and", "any", "open", "channels", "that", "are", "tied", "to", "it", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L610-L620
train
bitprophet/ssh
ssh/transport.py
Transport.open_forwarded_tcpip_channel
def open_forwarded_tcpip_channel(self, (src_addr, src_port), (dest_addr, dest_port)): """ Request a new channel back to the client, of type C{"forwarded-tcpip"}. This is used after a client has requested port forwarding, for sending incoming connections back to the client. @param src_addr: originator's address @param src_port: originator's port @param dest_addr: local (server) connected address @param dest_port: local (server) connected port """ return self.open_channel('forwarded-tcpip', (dest_addr, dest_port), (src_addr, src_port))
python
def open_forwarded_tcpip_channel(self, (src_addr, src_port), (dest_addr, dest_port)): """ Request a new channel back to the client, of type C{"forwarded-tcpip"}. This is used after a client has requested port forwarding, for sending incoming connections back to the client. @param src_addr: originator's address @param src_port: originator's port @param dest_addr: local (server) connected address @param dest_port: local (server) connected port """ return self.open_channel('forwarded-tcpip', (dest_addr, dest_port), (src_addr, src_port))
[ "def", "open_forwarded_tcpip_channel", "(", "self", ",", "(", "src_addr", ",", "src_port", ")", ",", "(", "dest_addr", ",", "dest_port", ")", ")", ":", "return", "self", ".", "open_channel", "(", "'forwarded-tcpip'", ",", "(", "dest_addr", ",", "dest_port", ")", ",", "(", "src_addr", ",", "src_port", ")", ")" ]
Request a new channel back to the client, of type C{"forwarded-tcpip"}. This is used after a client has requested port forwarding, for sending incoming connections back to the client. @param src_addr: originator's address @param src_port: originator's port @param dest_addr: local (server) connected address @param dest_port: local (server) connected port
[ "Request", "a", "new", "channel", "back", "to", "the", "client", "of", "type", "C", "{", "forwarded", "-", "tcpip", "}", ".", "This", "is", "used", "after", "a", "client", "has", "requested", "port", "forwarding", "for", "sending", "incoming", "connections", "back", "to", "the", "client", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L693-L704
train
bitprophet/ssh
ssh/transport.py
Transport.request_port_forward
def request_port_forward(self, address, port, handler=None): """ Ask the server to forward TCP connections from a listening port on the server, across this SSH session. If a handler is given, that handler is called from a different thread whenever a forwarded connection arrives. The handler parameters are:: handler(channel, (origin_addr, origin_port), (server_addr, server_port)) where C{server_addr} and C{server_port} are the address and port that the server was listening on. If no handler is set, the default behavior is to send new incoming forwarded connections into the accept queue, to be picked up via L{accept}. @param address: the address to bind when forwarding @type address: str @param port: the port to forward, or 0 to ask the server to allocate any port @type port: int @param handler: optional handler for incoming forwarded connections @type handler: function(Channel, (str, int), (str, int)) @return: the port # allocated by the server @rtype: int @raise SSHException: if the server refused the TCP forward request """ if not self.active: raise SSHException('SSH session not active') address = str(address) port = int(port) response = self.global_request('tcpip-forward', (address, port), wait=True) if response is None: raise SSHException('TCP forwarding request denied') if port == 0: port = response.get_int() if handler is None: def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)): self._queue_incoming_channel(channel) handler = default_handler self._tcp_handler = handler return port
python
def request_port_forward(self, address, port, handler=None): """ Ask the server to forward TCP connections from a listening port on the server, across this SSH session. If a handler is given, that handler is called from a different thread whenever a forwarded connection arrives. The handler parameters are:: handler(channel, (origin_addr, origin_port), (server_addr, server_port)) where C{server_addr} and C{server_port} are the address and port that the server was listening on. If no handler is set, the default behavior is to send new incoming forwarded connections into the accept queue, to be picked up via L{accept}. @param address: the address to bind when forwarding @type address: str @param port: the port to forward, or 0 to ask the server to allocate any port @type port: int @param handler: optional handler for incoming forwarded connections @type handler: function(Channel, (str, int), (str, int)) @return: the port # allocated by the server @rtype: int @raise SSHException: if the server refused the TCP forward request """ if not self.active: raise SSHException('SSH session not active') address = str(address) port = int(port) response = self.global_request('tcpip-forward', (address, port), wait=True) if response is None: raise SSHException('TCP forwarding request denied') if port == 0: port = response.get_int() if handler is None: def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)): self._queue_incoming_channel(channel) handler = default_handler self._tcp_handler = handler return port
[ "def", "request_port_forward", "(", "self", ",", "address", ",", "port", ",", "handler", "=", "None", ")", ":", "if", "not", "self", ".", "active", ":", "raise", "SSHException", "(", "'SSH session not active'", ")", "address", "=", "str", "(", "address", ")", "port", "=", "int", "(", "port", ")", "response", "=", "self", ".", "global_request", "(", "'tcpip-forward'", ",", "(", "address", ",", "port", ")", ",", "wait", "=", "True", ")", "if", "response", "is", "None", ":", "raise", "SSHException", "(", "'TCP forwarding request denied'", ")", "if", "port", "==", "0", ":", "port", "=", "response", ".", "get_int", "(", ")", "if", "handler", "is", "None", ":", "def", "default_handler", "(", "channel", ",", "(", "src_addr", ",", "src_port", ")", ",", "(", "dest_addr", ",", "dest_port", ")", ")", ":", "self", ".", "_queue_incoming_channel", "(", "channel", ")", "handler", "=", "default_handler", "self", ".", "_tcp_handler", "=", "handler", "return", "port" ]
Ask the server to forward TCP connections from a listening port on the server, across this SSH session. If a handler is given, that handler is called from a different thread whenever a forwarded connection arrives. The handler parameters are:: handler(channel, (origin_addr, origin_port), (server_addr, server_port)) where C{server_addr} and C{server_port} are the address and port that the server was listening on. If no handler is set, the default behavior is to send new incoming forwarded connections into the accept queue, to be picked up via L{accept}. @param address: the address to bind when forwarding @type address: str @param port: the port to forward, or 0 to ask the server to allocate any port @type port: int @param handler: optional handler for incoming forwarded connections @type handler: function(Channel, (str, int), (str, int)) @return: the port # allocated by the server @rtype: int @raise SSHException: if the server refused the TCP forward request
[ "Ask", "the", "server", "to", "forward", "TCP", "connections", "from", "a", "listening", "port", "on", "the", "server", "across", "this", "SSH", "session", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L774-L817
train
bitprophet/ssh
ssh/transport.py
Transport.send_ignore
def send_ignore(self, bytes=None): """ Send a junk packet across the encrypted link. This is sometimes used to add "noise" to a connection to confuse would-be attackers. It can also be used as a keep-alive for long lived connections traversing firewalls. @param bytes: the number of random bytes to send in the payload of the ignored packet -- defaults to a random number from 10 to 41. @type bytes: int """ m = Message() m.add_byte(chr(MSG_IGNORE)) if bytes is None: bytes = (ord(rng.read(1)) % 32) + 10 m.add_bytes(rng.read(bytes)) self._send_user_message(m)
python
def send_ignore(self, bytes=None): """ Send a junk packet across the encrypted link. This is sometimes used to add "noise" to a connection to confuse would-be attackers. It can also be used as a keep-alive for long lived connections traversing firewalls. @param bytes: the number of random bytes to send in the payload of the ignored packet -- defaults to a random number from 10 to 41. @type bytes: int """ m = Message() m.add_byte(chr(MSG_IGNORE)) if bytes is None: bytes = (ord(rng.read(1)) % 32) + 10 m.add_bytes(rng.read(bytes)) self._send_user_message(m)
[ "def", "send_ignore", "(", "self", ",", "bytes", "=", "None", ")", ":", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "chr", "(", "MSG_IGNORE", ")", ")", "if", "bytes", "is", "None", ":", "bytes", "=", "(", "ord", "(", "rng", ".", "read", "(", "1", ")", ")", "%", "32", ")", "+", "10", "m", ".", "add_bytes", "(", "rng", ".", "read", "(", "bytes", ")", ")", "self", ".", "_send_user_message", "(", "m", ")" ]
Send a junk packet across the encrypted link. This is sometimes used to add "noise" to a connection to confuse would-be attackers. It can also be used as a keep-alive for long lived connections traversing firewalls. @param bytes: the number of random bytes to send in the payload of the ignored packet -- defaults to a random number from 10 to 41. @type bytes: int
[ "Send", "a", "junk", "packet", "across", "the", "encrypted", "link", ".", "This", "is", "sometimes", "used", "to", "add", "noise", "to", "a", "connection", "to", "confuse", "would", "-", "be", "attackers", ".", "It", "can", "also", "be", "used", "as", "a", "keep", "-", "alive", "for", "long", "lived", "connections", "traversing", "firewalls", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L847-L863
train
bitprophet/ssh
ssh/transport.py
Transport.renegotiate_keys
def renegotiate_keys(self): """ Force this session to switch to new keys. Normally this is done automatically after the session hits a certain number of packets or bytes sent or received, but this method gives you the option of forcing new keys whenever you want. Negotiating new keys causes a pause in traffic both ways as the two sides swap keys and do computations. This method returns when the session has switched to new keys. @raise SSHException: if the key renegotiation failed (which causes the session to end) """ self.completion_event = threading.Event() self._send_kex_init() while True: self.completion_event.wait(0.1) if not self.active: e = self.get_exception() if e is not None: raise e raise SSHException('Negotiation failed.') if self.completion_event.isSet(): break return
python
def renegotiate_keys(self): """ Force this session to switch to new keys. Normally this is done automatically after the session hits a certain number of packets or bytes sent or received, but this method gives you the option of forcing new keys whenever you want. Negotiating new keys causes a pause in traffic both ways as the two sides swap keys and do computations. This method returns when the session has switched to new keys. @raise SSHException: if the key renegotiation failed (which causes the session to end) """ self.completion_event = threading.Event() self._send_kex_init() while True: self.completion_event.wait(0.1) if not self.active: e = self.get_exception() if e is not None: raise e raise SSHException('Negotiation failed.') if self.completion_event.isSet(): break return
[ "def", "renegotiate_keys", "(", "self", ")", ":", "self", ".", "completion_event", "=", "threading", ".", "Event", "(", ")", "self", ".", "_send_kex_init", "(", ")", "while", "True", ":", "self", ".", "completion_event", ".", "wait", "(", "0.1", ")", "if", "not", "self", ".", "active", ":", "e", "=", "self", ".", "get_exception", "(", ")", "if", "e", "is", "not", "None", ":", "raise", "e", "raise", "SSHException", "(", "'Negotiation failed.'", ")", "if", "self", ".", "completion_event", ".", "isSet", "(", ")", ":", "break", "return" ]
Force this session to switch to new keys. Normally this is done automatically after the session hits a certain number of packets or bytes sent or received, but this method gives you the option of forcing new keys whenever you want. Negotiating new keys causes a pause in traffic both ways as the two sides swap keys and do computations. This method returns when the session has switched to new keys. @raise SSHException: if the key renegotiation failed (which causes the session to end)
[ "Force", "this", "session", "to", "switch", "to", "new", "keys", ".", "Normally", "this", "is", "done", "automatically", "after", "the", "session", "hits", "a", "certain", "number", "of", "packets", "or", "bytes", "sent", "or", "received", "but", "this", "method", "gives", "you", "the", "option", "of", "forcing", "new", "keys", "whenever", "you", "want", ".", "Negotiating", "new", "keys", "causes", "a", "pause", "in", "traffic", "both", "ways", "as", "the", "two", "sides", "swap", "keys", "and", "do", "computations", ".", "This", "method", "returns", "when", "the", "session", "has", "switched", "to", "new", "keys", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L865-L888
train
bitprophet/ssh
ssh/transport.py
Transport.set_keepalive
def set_keepalive(self, interval): """ Turn on/off keepalive packets (default is off). If this is set, after C{interval} seconds without sending any data over the connection, a "keepalive" packet will be sent (and ignored by the remote host). This can be useful to keep connections alive over a NAT, for example. @param interval: seconds to wait before sending a keepalive packet (or 0 to disable keepalives). @type interval: int """ self.packetizer.set_keepalive(interval, lambda x=weakref.proxy(self): x.global_request('keepalive@lag.net', wait=False))
python
def set_keepalive(self, interval): """ Turn on/off keepalive packets (default is off). If this is set, after C{interval} seconds without sending any data over the connection, a "keepalive" packet will be sent (and ignored by the remote host). This can be useful to keep connections alive over a NAT, for example. @param interval: seconds to wait before sending a keepalive packet (or 0 to disable keepalives). @type interval: int """ self.packetizer.set_keepalive(interval, lambda x=weakref.proxy(self): x.global_request('keepalive@lag.net', wait=False))
[ "def", "set_keepalive", "(", "self", ",", "interval", ")", ":", "self", ".", "packetizer", ".", "set_keepalive", "(", "interval", ",", "lambda", "x", "=", "weakref", ".", "proxy", "(", "self", ")", ":", "x", ".", "global_request", "(", "'keepalive@lag.net'", ",", "wait", "=", "False", ")", ")" ]
Turn on/off keepalive packets (default is off). If this is set, after C{interval} seconds without sending any data over the connection, a "keepalive" packet will be sent (and ignored by the remote host). This can be useful to keep connections alive over a NAT, for example. @param interval: seconds to wait before sending a keepalive packet (or 0 to disable keepalives). @type interval: int
[ "Turn", "on", "/", "off", "keepalive", "packets", "(", "default", "is", "off", ")", ".", "If", "this", "is", "set", "after", "C", "{", "interval", "}", "seconds", "without", "sending", "any", "data", "over", "the", "connection", "a", "keepalive", "packet", "will", "be", "sent", "(", "and", "ignored", "by", "the", "remote", "host", ")", ".", "This", "can", "be", "useful", "to", "keep", "connections", "alive", "over", "a", "NAT", "for", "example", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L890-L902
train
bitprophet/ssh
ssh/transport.py
Transport.connect
def connect(self, hostkey=None, username='', password=None, pkey=None): """ Negotiate an SSH2 session, and optionally verify the server's host key and authenticate using a password or private key. This is a shortcut for L{start_client}, L{get_remote_server_key}, and L{Transport.auth_password} or L{Transport.auth_publickey}. Use those methods if you want more control. You can use this method immediately after creating a Transport to negotiate encryption with a server. If it fails, an exception will be thrown. On success, the method will return cleanly, and an encrypted session exists. You may immediately call L{open_channel} or L{open_session} to get a L{Channel} object, which is used for data transfer. @note: If you fail to supply a password or private key, this method may succeed, but a subsequent L{open_channel} or L{open_session} call may fail because you haven't authenticated yet. @param hostkey: the host key expected from the server, or C{None} if you don't want to do host key verification. @type hostkey: L{PKey<pkey.PKey>} @param username: the username to authenticate as. @type username: str @param password: a password to use for authentication, if you want to use password authentication; otherwise C{None}. @type password: str @param pkey: a private key to use for authentication, if you want to use private key authentication; otherwise C{None}. @type pkey: L{PKey<pkey.PKey>} @raise SSHException: if the SSH2 negotiation fails, the host key supplied by the server is incorrect, or authentication fails. """ if hostkey is not None: self._preferred_keys = [ hostkey.get_name() ] self.start_client() # check host key if we were given one if (hostkey is not None): key = self.get_remote_server_key() if (key.get_name() != hostkey.get_name()) or (str(key) != str(hostkey)): self._log(DEBUG, 'Bad host key from server') self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(str(hostkey)))) self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(str(key)))) raise SSHException('Bad host key from server') self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name()) if (pkey is not None) or (password is not None): if password is not None: self._log(DEBUG, 'Attempting password auth...') self.auth_password(username, password) else: self._log(DEBUG, 'Attempting public-key auth...') self.auth_publickey(username, pkey) return
python
def connect(self, hostkey=None, username='', password=None, pkey=None): """ Negotiate an SSH2 session, and optionally verify the server's host key and authenticate using a password or private key. This is a shortcut for L{start_client}, L{get_remote_server_key}, and L{Transport.auth_password} or L{Transport.auth_publickey}. Use those methods if you want more control. You can use this method immediately after creating a Transport to negotiate encryption with a server. If it fails, an exception will be thrown. On success, the method will return cleanly, and an encrypted session exists. You may immediately call L{open_channel} or L{open_session} to get a L{Channel} object, which is used for data transfer. @note: If you fail to supply a password or private key, this method may succeed, but a subsequent L{open_channel} or L{open_session} call may fail because you haven't authenticated yet. @param hostkey: the host key expected from the server, or C{None} if you don't want to do host key verification. @type hostkey: L{PKey<pkey.PKey>} @param username: the username to authenticate as. @type username: str @param password: a password to use for authentication, if you want to use password authentication; otherwise C{None}. @type password: str @param pkey: a private key to use for authentication, if you want to use private key authentication; otherwise C{None}. @type pkey: L{PKey<pkey.PKey>} @raise SSHException: if the SSH2 negotiation fails, the host key supplied by the server is incorrect, or authentication fails. """ if hostkey is not None: self._preferred_keys = [ hostkey.get_name() ] self.start_client() # check host key if we were given one if (hostkey is not None): key = self.get_remote_server_key() if (key.get_name() != hostkey.get_name()) or (str(key) != str(hostkey)): self._log(DEBUG, 'Bad host key from server') self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(str(hostkey)))) self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(str(key)))) raise SSHException('Bad host key from server') self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name()) if (pkey is not None) or (password is not None): if password is not None: self._log(DEBUG, 'Attempting password auth...') self.auth_password(username, password) else: self._log(DEBUG, 'Attempting public-key auth...') self.auth_publickey(username, pkey) return
[ "def", "connect", "(", "self", ",", "hostkey", "=", "None", ",", "username", "=", "''", ",", "password", "=", "None", ",", "pkey", "=", "None", ")", ":", "if", "hostkey", "is", "not", "None", ":", "self", ".", "_preferred_keys", "=", "[", "hostkey", ".", "get_name", "(", ")", "]", "self", ".", "start_client", "(", ")", "# check host key if we were given one", "if", "(", "hostkey", "is", "not", "None", ")", ":", "key", "=", "self", ".", "get_remote_server_key", "(", ")", "if", "(", "key", ".", "get_name", "(", ")", "!=", "hostkey", ".", "get_name", "(", ")", ")", "or", "(", "str", "(", "key", ")", "!=", "str", "(", "hostkey", ")", ")", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Bad host key from server'", ")", "self", ".", "_log", "(", "DEBUG", ",", "'Expected: %s: %s'", "%", "(", "hostkey", ".", "get_name", "(", ")", ",", "repr", "(", "str", "(", "hostkey", ")", ")", ")", ")", "self", ".", "_log", "(", "DEBUG", ",", "'Got : %s: %s'", "%", "(", "key", ".", "get_name", "(", ")", ",", "repr", "(", "str", "(", "key", ")", ")", ")", ")", "raise", "SSHException", "(", "'Bad host key from server'", ")", "self", ".", "_log", "(", "DEBUG", ",", "'Host key verified (%s)'", "%", "hostkey", ".", "get_name", "(", ")", ")", "if", "(", "pkey", "is", "not", "None", ")", "or", "(", "password", "is", "not", "None", ")", ":", "if", "password", "is", "not", "None", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Attempting password auth...'", ")", "self", ".", "auth_password", "(", "username", ",", "password", ")", "else", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Attempting public-key auth...'", ")", "self", ".", "auth_publickey", "(", "username", ",", "pkey", ")", "return" ]
Negotiate an SSH2 session, and optionally verify the server's host key and authenticate using a password or private key. This is a shortcut for L{start_client}, L{get_remote_server_key}, and L{Transport.auth_password} or L{Transport.auth_publickey}. Use those methods if you want more control. You can use this method immediately after creating a Transport to negotiate encryption with a server. If it fails, an exception will be thrown. On success, the method will return cleanly, and an encrypted session exists. You may immediately call L{open_channel} or L{open_session} to get a L{Channel} object, which is used for data transfer. @note: If you fail to supply a password or private key, this method may succeed, but a subsequent L{open_channel} or L{open_session} call may fail because you haven't authenticated yet. @param hostkey: the host key expected from the server, or C{None} if you don't want to do host key verification. @type hostkey: L{PKey<pkey.PKey>} @param username: the username to authenticate as. @type username: str @param password: a password to use for authentication, if you want to use password authentication; otherwise C{None}. @type password: str @param pkey: a private key to use for authentication, if you want to use private key authentication; otherwise C{None}. @type pkey: L{PKey<pkey.PKey>} @raise SSHException: if the SSH2 negotiation fails, the host key supplied by the server is incorrect, or authentication fails.
[ "Negotiate", "an", "SSH2", "session", "and", "optionally", "verify", "the", "server", "s", "host", "key", "and", "authenticate", "using", "a", "password", "or", "private", "key", ".", "This", "is", "a", "shortcut", "for", "L", "{", "start_client", "}", "L", "{", "get_remote_server_key", "}", "and", "L", "{", "Transport", ".", "auth_password", "}", "or", "L", "{", "Transport", ".", "auth_publickey", "}", ".", "Use", "those", "methods", "if", "you", "want", "more", "control", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L969-L1026
train
bitprophet/ssh
ssh/transport.py
Transport.auth_none
def auth_none(self, username): """ Try to authenticate to the server using no authentication at all. This will almost always fail. It may be useful for determining the list of authentication types supported by the server, by catching the L{BadAuthenticationType} exception raised. @param username: the username to authenticate as @type username: string @return: list of auth types permissible for the next stage of authentication (normally empty) @rtype: list @raise BadAuthenticationType: if "none" authentication isn't allowed by the server for this user @raise SSHException: if the authentication failed due to a network error @since: 1.5 """ if (not self.active) or (not self.initial_kex_done): raise SSHException('No existing session') my_event = threading.Event() self.auth_handler = AuthHandler(self) self.auth_handler.auth_none(username, my_event) return self.auth_handler.wait_for_response(my_event)
python
def auth_none(self, username): """ Try to authenticate to the server using no authentication at all. This will almost always fail. It may be useful for determining the list of authentication types supported by the server, by catching the L{BadAuthenticationType} exception raised. @param username: the username to authenticate as @type username: string @return: list of auth types permissible for the next stage of authentication (normally empty) @rtype: list @raise BadAuthenticationType: if "none" authentication isn't allowed by the server for this user @raise SSHException: if the authentication failed due to a network error @since: 1.5 """ if (not self.active) or (not self.initial_kex_done): raise SSHException('No existing session') my_event = threading.Event() self.auth_handler = AuthHandler(self) self.auth_handler.auth_none(username, my_event) return self.auth_handler.wait_for_response(my_event)
[ "def", "auth_none", "(", "self", ",", "username", ")", ":", "if", "(", "not", "self", ".", "active", ")", "or", "(", "not", "self", ".", "initial_kex_done", ")", ":", "raise", "SSHException", "(", "'No existing session'", ")", "my_event", "=", "threading", ".", "Event", "(", ")", "self", ".", "auth_handler", "=", "AuthHandler", "(", "self", ")", "self", ".", "auth_handler", ".", "auth_none", "(", "username", ",", "my_event", ")", "return", "self", ".", "auth_handler", ".", "wait_for_response", "(", "my_event", ")" ]
Try to authenticate to the server using no authentication at all. This will almost always fail. It may be useful for determining the list of authentication types supported by the server, by catching the L{BadAuthenticationType} exception raised. @param username: the username to authenticate as @type username: string @return: list of auth types permissible for the next stage of authentication (normally empty) @rtype: list @raise BadAuthenticationType: if "none" authentication isn't allowed by the server for this user @raise SSHException: if the authentication failed due to a network error @since: 1.5
[ "Try", "to", "authenticate", "to", "the", "server", "using", "no", "authentication", "at", "all", ".", "This", "will", "almost", "always", "fail", ".", "It", "may", "be", "useful", "for", "determining", "the", "list", "of", "authentication", "types", "supported", "by", "the", "server", "by", "catching", "the", "L", "{", "BadAuthenticationType", "}", "exception", "raised", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L1094-L1119
train
bitprophet/ssh
ssh/transport.py
Transport._send_user_message
def _send_user_message(self, data): """ send a message, but block if we're in key negotiation. this is used for user-initiated requests. """ start = time.time() while True: self.clear_to_send.wait(0.1) if not self.active: self._log(DEBUG, 'Dropping user packet because connection is dead.') return self.clear_to_send_lock.acquire() if self.clear_to_send.isSet(): break self.clear_to_send_lock.release() if time.time() > start + self.clear_to_send_timeout: raise SSHException('Key-exchange timed out waiting for key negotiation') try: self._send_message(data) finally: self.clear_to_send_lock.release()
python
def _send_user_message(self, data): """ send a message, but block if we're in key negotiation. this is used for user-initiated requests. """ start = time.time() while True: self.clear_to_send.wait(0.1) if not self.active: self._log(DEBUG, 'Dropping user packet because connection is dead.') return self.clear_to_send_lock.acquire() if self.clear_to_send.isSet(): break self.clear_to_send_lock.release() if time.time() > start + self.clear_to_send_timeout: raise SSHException('Key-exchange timed out waiting for key negotiation') try: self._send_message(data) finally: self.clear_to_send_lock.release()
[ "def", "_send_user_message", "(", "self", ",", "data", ")", ":", "start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "self", ".", "clear_to_send", ".", "wait", "(", "0.1", ")", "if", "not", "self", ".", "active", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Dropping user packet because connection is dead.'", ")", "return", "self", ".", "clear_to_send_lock", ".", "acquire", "(", ")", "if", "self", ".", "clear_to_send", ".", "isSet", "(", ")", ":", "break", "self", ".", "clear_to_send_lock", ".", "release", "(", ")", "if", "time", ".", "time", "(", ")", ">", "start", "+", "self", ".", "clear_to_send_timeout", ":", "raise", "SSHException", "(", "'Key-exchange timed out waiting for key negotiation'", ")", "try", ":", "self", ".", "_send_message", "(", "data", ")", "finally", ":", "self", ".", "clear_to_send_lock", ".", "release", "(", ")" ]
send a message, but block if we're in key negotiation. this is used for user-initiated requests.
[ "send", "a", "message", "but", "block", "if", "we", "re", "in", "key", "negotiation", ".", "this", "is", "used", "for", "user", "-", "initiated", "requests", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L1425-L1445
train
bitprophet/ssh
ssh/transport.py
Transport._set_K_H
def _set_K_H(self, k, h): "used by a kex object to set the K (root key) and H (exchange hash)" self.K = k self.H = h if self.session_id == None: self.session_id = h
python
def _set_K_H(self, k, h): "used by a kex object to set the K (root key) and H (exchange hash)" self.K = k self.H = h if self.session_id == None: self.session_id = h
[ "def", "_set_K_H", "(", "self", ",", "k", ",", "h", ")", ":", "self", ".", "K", "=", "k", "self", ".", "H", "=", "h", "if", "self", ".", "session_id", "==", "None", ":", "self", ".", "session_id", "=", "h" ]
used by a kex object to set the K (root key) and H (exchange hash)
[ "used", "by", "a", "kex", "object", "to", "set", "the", "K", "(", "root", "key", ")", "and", "H", "(", "exchange", "hash", ")" ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L1447-L1452
train
bitprophet/ssh
ssh/transport.py
Transport._compute_key
def _compute_key(self, id, nbytes): "id is 'A' - 'F' for the various keys used by ssh" m = Message() m.add_mpint(self.K) m.add_bytes(self.H) m.add_byte(id) m.add_bytes(self.session_id) out = sofar = SHA.new(str(m)).digest() while len(out) < nbytes: m = Message() m.add_mpint(self.K) m.add_bytes(self.H) m.add_bytes(sofar) digest = SHA.new(str(m)).digest() out += digest sofar += digest return out[:nbytes]
python
def _compute_key(self, id, nbytes): "id is 'A' - 'F' for the various keys used by ssh" m = Message() m.add_mpint(self.K) m.add_bytes(self.H) m.add_byte(id) m.add_bytes(self.session_id) out = sofar = SHA.new(str(m)).digest() while len(out) < nbytes: m = Message() m.add_mpint(self.K) m.add_bytes(self.H) m.add_bytes(sofar) digest = SHA.new(str(m)).digest() out += digest sofar += digest return out[:nbytes]
[ "def", "_compute_key", "(", "self", ",", "id", ",", "nbytes", ")", ":", "m", "=", "Message", "(", ")", "m", ".", "add_mpint", "(", "self", ".", "K", ")", "m", ".", "add_bytes", "(", "self", ".", "H", ")", "m", ".", "add_byte", "(", "id", ")", "m", ".", "add_bytes", "(", "self", ".", "session_id", ")", "out", "=", "sofar", "=", "SHA", ".", "new", "(", "str", "(", "m", ")", ")", ".", "digest", "(", ")", "while", "len", "(", "out", ")", "<", "nbytes", ":", "m", "=", "Message", "(", ")", "m", ".", "add_mpint", "(", "self", ".", "K", ")", "m", ".", "add_bytes", "(", "self", ".", "H", ")", "m", ".", "add_bytes", "(", "sofar", ")", "digest", "=", "SHA", ".", "new", "(", "str", "(", "m", ")", ")", ".", "digest", "(", ")", "out", "+=", "digest", "sofar", "+=", "digest", "return", "out", "[", ":", "nbytes", "]" ]
id is 'A' - 'F' for the various keys used by ssh
[ "id", "is", "A", "-", "F", "for", "the", "various", "keys", "used", "by", "ssh" ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L1466-L1482
train
bitprophet/ssh
ssh/transport.py
Transport._send_kex_init
def _send_kex_init(self): """ announce to the other side that we'd like to negotiate keys, and what kind of key negotiation we support. """ self.clear_to_send_lock.acquire() try: self.clear_to_send.clear() finally: self.clear_to_send_lock.release() self.in_kex = True if self.server_mode: if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex): # can't do group-exchange if we don't have a pack of potential primes pkex = list(self.get_security_options().kex) pkex.remove('diffie-hellman-group-exchange-sha1') self.get_security_options().kex = pkex available_server_keys = filter(self.server_key_dict.keys().__contains__, self._preferred_keys) else: available_server_keys = self._preferred_keys m = Message() m.add_byte(chr(MSG_KEXINIT)) m.add_bytes(rng.read(16)) m.add_list(self._preferred_kex) m.add_list(available_server_keys) m.add_list(self._preferred_ciphers) m.add_list(self._preferred_ciphers) m.add_list(self._preferred_macs) m.add_list(self._preferred_macs) m.add_list(self._preferred_compression) m.add_list(self._preferred_compression) m.add_string('') m.add_string('') m.add_boolean(False) m.add_int(0) # save a copy for later (needed to compute a hash) self.local_kex_init = str(m) self._send_message(m)
python
def _send_kex_init(self): """ announce to the other side that we'd like to negotiate keys, and what kind of key negotiation we support. """ self.clear_to_send_lock.acquire() try: self.clear_to_send.clear() finally: self.clear_to_send_lock.release() self.in_kex = True if self.server_mode: if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex): # can't do group-exchange if we don't have a pack of potential primes pkex = list(self.get_security_options().kex) pkex.remove('diffie-hellman-group-exchange-sha1') self.get_security_options().kex = pkex available_server_keys = filter(self.server_key_dict.keys().__contains__, self._preferred_keys) else: available_server_keys = self._preferred_keys m = Message() m.add_byte(chr(MSG_KEXINIT)) m.add_bytes(rng.read(16)) m.add_list(self._preferred_kex) m.add_list(available_server_keys) m.add_list(self._preferred_ciphers) m.add_list(self._preferred_ciphers) m.add_list(self._preferred_macs) m.add_list(self._preferred_macs) m.add_list(self._preferred_compression) m.add_list(self._preferred_compression) m.add_string('') m.add_string('') m.add_boolean(False) m.add_int(0) # save a copy for later (needed to compute a hash) self.local_kex_init = str(m) self._send_message(m)
[ "def", "_send_kex_init", "(", "self", ")", ":", "self", ".", "clear_to_send_lock", ".", "acquire", "(", ")", "try", ":", "self", ".", "clear_to_send", ".", "clear", "(", ")", "finally", ":", "self", ".", "clear_to_send_lock", ".", "release", "(", ")", "self", ".", "in_kex", "=", "True", "if", "self", ".", "server_mode", ":", "if", "(", "self", ".", "_modulus_pack", "is", "None", ")", "and", "(", "'diffie-hellman-group-exchange-sha1'", "in", "self", ".", "_preferred_kex", ")", ":", "# can't do group-exchange if we don't have a pack of potential primes", "pkex", "=", "list", "(", "self", ".", "get_security_options", "(", ")", ".", "kex", ")", "pkex", ".", "remove", "(", "'diffie-hellman-group-exchange-sha1'", ")", "self", ".", "get_security_options", "(", ")", ".", "kex", "=", "pkex", "available_server_keys", "=", "filter", "(", "self", ".", "server_key_dict", ".", "keys", "(", ")", ".", "__contains__", ",", "self", ".", "_preferred_keys", ")", "else", ":", "available_server_keys", "=", "self", ".", "_preferred_keys", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "chr", "(", "MSG_KEXINIT", ")", ")", "m", ".", "add_bytes", "(", "rng", ".", "read", "(", "16", ")", ")", "m", ".", "add_list", "(", "self", ".", "_preferred_kex", ")", "m", ".", "add_list", "(", "available_server_keys", ")", "m", ".", "add_list", "(", "self", ".", "_preferred_ciphers", ")", "m", ".", "add_list", "(", "self", ".", "_preferred_ciphers", ")", "m", ".", "add_list", "(", "self", ".", "_preferred_macs", ")", "m", ".", "add_list", "(", "self", ".", "_preferred_macs", ")", "m", ".", "add_list", "(", "self", ".", "_preferred_compression", ")", "m", ".", "add_list", "(", "self", ".", "_preferred_compression", ")", "m", ".", "add_string", "(", "''", ")", "m", ".", "add_string", "(", "''", ")", "m", ".", "add_boolean", "(", "False", ")", "m", ".", "add_int", "(", "0", ")", "# save a copy for later (needed to compute a hash)", "self", ".", "local_kex_init", "=", "str", "(", "m", ")", "self", ".", "_send_message", "(", "m", ")" ]
announce to the other side that we'd like to negotiate keys, and what kind of key negotiation we support.
[ "announce", "to", "the", "other", "side", "that", "we", "d", "like", "to", "negotiate", "keys", "and", "what", "kind", "of", "key", "negotiation", "we", "support", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L1698-L1737
train
bitprophet/ssh
ssh/primes.py
_generate_prime
def _generate_prime(bits, rng): "primtive attempt at prime generation" hbyte_mask = pow(2, bits % 8) - 1 while True: # loop catches the case where we increment n into a higher bit-range x = rng.read((bits+7) // 8) if hbyte_mask > 0: x = chr(ord(x[0]) & hbyte_mask) + x[1:] n = util.inflate_long(x, 1) n |= 1 n |= (1 << (bits - 1)) while not number.isPrime(n): n += 2 if util.bit_length(n) == bits: break return n
python
def _generate_prime(bits, rng): "primtive attempt at prime generation" hbyte_mask = pow(2, bits % 8) - 1 while True: # loop catches the case where we increment n into a higher bit-range x = rng.read((bits+7) // 8) if hbyte_mask > 0: x = chr(ord(x[0]) & hbyte_mask) + x[1:] n = util.inflate_long(x, 1) n |= 1 n |= (1 << (bits - 1)) while not number.isPrime(n): n += 2 if util.bit_length(n) == bits: break return n
[ "def", "_generate_prime", "(", "bits", ",", "rng", ")", ":", "hbyte_mask", "=", "pow", "(", "2", ",", "bits", "%", "8", ")", "-", "1", "while", "True", ":", "# loop catches the case where we increment n into a higher bit-range", "x", "=", "rng", ".", "read", "(", "(", "bits", "+", "7", ")", "//", "8", ")", "if", "hbyte_mask", ">", "0", ":", "x", "=", "chr", "(", "ord", "(", "x", "[", "0", "]", ")", "&", "hbyte_mask", ")", "+", "x", "[", "1", ":", "]", "n", "=", "util", ".", "inflate_long", "(", "x", ",", "1", ")", "n", "|=", "1", "n", "|=", "(", "1", "<<", "(", "bits", "-", "1", ")", ")", "while", "not", "number", ".", "isPrime", "(", "n", ")", ":", "n", "+=", "2", "if", "util", ".", "bit_length", "(", "n", ")", "==", "bits", ":", "break", "return", "n" ]
primtive attempt at prime generation
[ "primtive", "attempt", "at", "prime", "generation" ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/primes.py#L29-L44
train
bitprophet/ssh
ssh/primes.py
_roll_random
def _roll_random(rng, n): "returns a random # from 0 to N-1" bits = util.bit_length(n-1) bytes = (bits + 7) // 8 hbyte_mask = pow(2, bits % 8) - 1 # so here's the plan: # we fetch as many random bits as we'd need to fit N-1, and if the # generated number is >= N, we try again. in the worst case (N-1 is a # power of 2), we have slightly better than 50% odds of getting one that # fits, so i can't guarantee that this loop will ever finish, but the odds # of it looping forever should be infinitesimal. while True: x = rng.read(bytes) if hbyte_mask > 0: x = chr(ord(x[0]) & hbyte_mask) + x[1:] num = util.inflate_long(x, 1) if num < n: break return num
python
def _roll_random(rng, n): "returns a random # from 0 to N-1" bits = util.bit_length(n-1) bytes = (bits + 7) // 8 hbyte_mask = pow(2, bits % 8) - 1 # so here's the plan: # we fetch as many random bits as we'd need to fit N-1, and if the # generated number is >= N, we try again. in the worst case (N-1 is a # power of 2), we have slightly better than 50% odds of getting one that # fits, so i can't guarantee that this loop will ever finish, but the odds # of it looping forever should be infinitesimal. while True: x = rng.read(bytes) if hbyte_mask > 0: x = chr(ord(x[0]) & hbyte_mask) + x[1:] num = util.inflate_long(x, 1) if num < n: break return num
[ "def", "_roll_random", "(", "rng", ",", "n", ")", ":", "bits", "=", "util", ".", "bit_length", "(", "n", "-", "1", ")", "bytes", "=", "(", "bits", "+", "7", ")", "//", "8", "hbyte_mask", "=", "pow", "(", "2", ",", "bits", "%", "8", ")", "-", "1", "# so here's the plan:", "# we fetch as many random bits as we'd need to fit N-1, and if the", "# generated number is >= N, we try again. in the worst case (N-1 is a", "# power of 2), we have slightly better than 50% odds of getting one that", "# fits, so i can't guarantee that this loop will ever finish, but the odds", "# of it looping forever should be infinitesimal.", "while", "True", ":", "x", "=", "rng", ".", "read", "(", "bytes", ")", "if", "hbyte_mask", ">", "0", ":", "x", "=", "chr", "(", "ord", "(", "x", "[", "0", "]", ")", "&", "hbyte_mask", ")", "+", "x", "[", "1", ":", "]", "num", "=", "util", ".", "inflate_long", "(", "x", ",", "1", ")", "if", "num", "<", "n", ":", "break", "return", "num" ]
returns a random # from 0 to N-1
[ "returns", "a", "random", "#", "from", "0", "to", "N", "-", "1" ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/primes.py#L46-L65
train
bitprophet/ssh
ssh/pkey.py
PKey._write_private_key_file
def _write_private_key_file(self, tag, filename, data, password=None): """ Write an SSH2-format private key file in a form that can be read by ssh or openssh. If no password is given, the key is written in a trivially-encoded format (base64) which is completely insecure. If a password is given, DES-EDE3-CBC is used. @param tag: C{"RSA"} or C{"DSA"}, the tag used to mark the data block. @type tag: str @param filename: name of the file to write. @type filename: str @param data: data blob that makes up the private key. @type data: str @param password: an optional password to use to encrypt the file. @type password: str @raise IOError: if there was an error writing the file. """ f = open(filename, 'w', 0600) # grrr... the mode doesn't always take hold os.chmod(filename, 0600) self._write_private_key(tag, f, data, password) f.close()
python
def _write_private_key_file(self, tag, filename, data, password=None): """ Write an SSH2-format private key file in a form that can be read by ssh or openssh. If no password is given, the key is written in a trivially-encoded format (base64) which is completely insecure. If a password is given, DES-EDE3-CBC is used. @param tag: C{"RSA"} or C{"DSA"}, the tag used to mark the data block. @type tag: str @param filename: name of the file to write. @type filename: str @param data: data blob that makes up the private key. @type data: str @param password: an optional password to use to encrypt the file. @type password: str @raise IOError: if there was an error writing the file. """ f = open(filename, 'w', 0600) # grrr... the mode doesn't always take hold os.chmod(filename, 0600) self._write_private_key(tag, f, data, password) f.close()
[ "def", "_write_private_key_file", "(", "self", ",", "tag", ",", "filename", ",", "data", ",", "password", "=", "None", ")", ":", "f", "=", "open", "(", "filename", ",", "'w'", ",", "0600", ")", "# grrr... the mode doesn't always take hold", "os", ".", "chmod", "(", "filename", ",", "0600", ")", "self", ".", "_write_private_key", "(", "tag", ",", "f", ",", "data", ",", "password", ")", "f", ".", "close", "(", ")" ]
Write an SSH2-format private key file in a form that can be read by ssh or openssh. If no password is given, the key is written in a trivially-encoded format (base64) which is completely insecure. If a password is given, DES-EDE3-CBC is used. @param tag: C{"RSA"} or C{"DSA"}, the tag used to mark the data block. @type tag: str @param filename: name of the file to write. @type filename: str @param data: data blob that makes up the private key. @type data: str @param password: an optional password to use to encrypt the file. @type password: str @raise IOError: if there was an error writing the file.
[ "Write", "an", "SSH2", "-", "format", "private", "key", "file", "in", "a", "form", "that", "can", "be", "read", "by", "ssh", "or", "openssh", ".", "If", "no", "password", "is", "given", "the", "key", "is", "written", "in", "a", "trivially", "-", "encoded", "format", "(", "base64", ")", "which", "is", "completely", "insecure", ".", "If", "a", "password", "is", "given", "DES", "-", "EDE3", "-", "CBC", "is", "used", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/pkey.py#L331-L353
train
bitprophet/ssh
ssh/buffered_pipe.py
BufferedPipe.set_event
def set_event(self, event): """ Set an event on this buffer. When data is ready to be read (or the buffer has been closed), the event will be set. When no data is ready, the event will be cleared. @param event: the event to set/clear @type event: Event """ self._event = event if len(self._buffer) > 0: event.set() else: event.clear()
python
def set_event(self, event): """ Set an event on this buffer. When data is ready to be read (or the buffer has been closed), the event will be set. When no data is ready, the event will be cleared. @param event: the event to set/clear @type event: Event """ self._event = event if len(self._buffer) > 0: event.set() else: event.clear()
[ "def", "set_event", "(", "self", ",", "event", ")", ":", "self", ".", "_event", "=", "event", "if", "len", "(", "self", ".", "_buffer", ")", ">", "0", ":", "event", ".", "set", "(", ")", "else", ":", "event", ".", "clear", "(", ")" ]
Set an event on this buffer. When data is ready to be read (or the buffer has been closed), the event will be set. When no data is ready, the event will be cleared. @param event: the event to set/clear @type event: Event
[ "Set", "an", "event", "on", "this", "buffer", ".", "When", "data", "is", "ready", "to", "be", "read", "(", "or", "the", "buffer", "has", "been", "closed", ")", "the", "event", "will", "be", "set", ".", "When", "no", "data", "is", "ready", "the", "event", "will", "be", "cleared", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/buffered_pipe.py#L51-L64
train
bitprophet/ssh
ssh/buffered_pipe.py
BufferedPipe.feed
def feed(self, data): """ Feed new data into this pipe. This method is assumed to be called from a separate thread, so synchronization is done. @param data: the data to add @type data: str """ self._lock.acquire() try: if self._event is not None: self._event.set() self._buffer.fromstring(data) self._cv.notifyAll() finally: self._lock.release()
python
def feed(self, data): """ Feed new data into this pipe. This method is assumed to be called from a separate thread, so synchronization is done. @param data: the data to add @type data: str """ self._lock.acquire() try: if self._event is not None: self._event.set() self._buffer.fromstring(data) self._cv.notifyAll() finally: self._lock.release()
[ "def", "feed", "(", "self", ",", "data", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "try", ":", "if", "self", ".", "_event", "is", "not", "None", ":", "self", ".", "_event", ".", "set", "(", ")", "self", ".", "_buffer", ".", "fromstring", "(", "data", ")", "self", ".", "_cv", ".", "notifyAll", "(", ")", "finally", ":", "self", ".", "_lock", ".", "release", "(", ")" ]
Feed new data into this pipe. This method is assumed to be called from a separate thread, so synchronization is done. @param data: the data to add @type data: str
[ "Feed", "new", "data", "into", "this", "pipe", ".", "This", "method", "is", "assumed", "to", "be", "called", "from", "a", "separate", "thread", "so", "synchronization", "is", "done", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/buffered_pipe.py#L66-L81
train
bitprophet/ssh
ssh/sftp_handle.py
SFTPHandle.read
def read(self, offset, length): """ Read up to C{length} bytes from this file, starting at position C{offset}. The offset may be a python long, since SFTP allows it to be 64 bits. If the end of the file has been reached, this method may return an empty string to signify EOF, or it may also return L{SFTP_EOF}. The default implementation checks for an attribute on C{self} named C{readfile}, and if present, performs the read operation on the python file-like object found there. (This is meant as a time saver for the common case where you are wrapping a python file object.) @param offset: position in the file to start reading from. @type offset: int or long @param length: number of bytes to attempt to read. @type length: int @return: data read from the file, or an SFTP error code. @rtype: str """ readfile = getattr(self, 'readfile', None) if readfile is None: return SFTP_OP_UNSUPPORTED try: if self.__tell is None: self.__tell = readfile.tell() if offset != self.__tell: readfile.seek(offset) self.__tell = offset data = readfile.read(length) except IOError, e: self.__tell = None return SFTPServer.convert_errno(e.errno) self.__tell += len(data) return data
python
def read(self, offset, length): """ Read up to C{length} bytes from this file, starting at position C{offset}. The offset may be a python long, since SFTP allows it to be 64 bits. If the end of the file has been reached, this method may return an empty string to signify EOF, or it may also return L{SFTP_EOF}. The default implementation checks for an attribute on C{self} named C{readfile}, and if present, performs the read operation on the python file-like object found there. (This is meant as a time saver for the common case where you are wrapping a python file object.) @param offset: position in the file to start reading from. @type offset: int or long @param length: number of bytes to attempt to read. @type length: int @return: data read from the file, or an SFTP error code. @rtype: str """ readfile = getattr(self, 'readfile', None) if readfile is None: return SFTP_OP_UNSUPPORTED try: if self.__tell is None: self.__tell = readfile.tell() if offset != self.__tell: readfile.seek(offset) self.__tell = offset data = readfile.read(length) except IOError, e: self.__tell = None return SFTPServer.convert_errno(e.errno) self.__tell += len(data) return data
[ "def", "read", "(", "self", ",", "offset", ",", "length", ")", ":", "readfile", "=", "getattr", "(", "self", ",", "'readfile'", ",", "None", ")", "if", "readfile", "is", "None", ":", "return", "SFTP_OP_UNSUPPORTED", "try", ":", "if", "self", ".", "__tell", "is", "None", ":", "self", ".", "__tell", "=", "readfile", ".", "tell", "(", ")", "if", "offset", "!=", "self", ".", "__tell", ":", "readfile", ".", "seek", "(", "offset", ")", "self", ".", "__tell", "=", "offset", "data", "=", "readfile", ".", "read", "(", "length", ")", "except", "IOError", ",", "e", ":", "self", ".", "__tell", "=", "None", "return", "SFTPServer", ".", "convert_errno", "(", "e", ".", "errno", ")", "self", ".", "__tell", "+=", "len", "(", "data", ")", "return", "data" ]
Read up to C{length} bytes from this file, starting at position C{offset}. The offset may be a python long, since SFTP allows it to be 64 bits. If the end of the file has been reached, this method may return an empty string to signify EOF, or it may also return L{SFTP_EOF}. The default implementation checks for an attribute on C{self} named C{readfile}, and if present, performs the read operation on the python file-like object found there. (This is meant as a time saver for the common case where you are wrapping a python file object.) @param offset: position in the file to start reading from. @type offset: int or long @param length: number of bytes to attempt to read. @type length: int @return: data read from the file, or an SFTP error code. @rtype: str
[ "Read", "up", "to", "C", "{", "length", "}", "bytes", "from", "this", "file", "starting", "at", "position", "C", "{", "offset", "}", ".", "The", "offset", "may", "be", "a", "python", "long", "since", "SFTP", "allows", "it", "to", "be", "64", "bits", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/sftp_handle.py#L72-L107
train
bitprophet/ssh
ssh/sftp_handle.py
SFTPHandle.write
def write(self, offset, data): """ Write C{data} into this file at position C{offset}. Extending the file past its original end is expected. Unlike python's normal C{write()} methods, this method cannot do a partial write: it must write all of C{data} or else return an error. The default implementation checks for an attribute on C{self} named C{writefile}, and if present, performs the write operation on the python file-like object found there. The attribute is named differently from C{readfile} to make it easy to implement read-only (or write-only) files, but if both attributes are present, they should refer to the same file. @param offset: position in the file to start reading from. @type offset: int or long @param data: data to write into the file. @type data: str @return: an SFTP error code like L{SFTP_OK}. """ writefile = getattr(self, 'writefile', None) if writefile is None: return SFTP_OP_UNSUPPORTED try: # in append mode, don't care about seeking if (self.__flags & os.O_APPEND) == 0: if self.__tell is None: self.__tell = writefile.tell() if offset != self.__tell: writefile.seek(offset) self.__tell = offset writefile.write(data) writefile.flush() except IOError, e: self.__tell = None return SFTPServer.convert_errno(e.errno) if self.__tell is not None: self.__tell += len(data) return SFTP_OK
python
def write(self, offset, data): """ Write C{data} into this file at position C{offset}. Extending the file past its original end is expected. Unlike python's normal C{write()} methods, this method cannot do a partial write: it must write all of C{data} or else return an error. The default implementation checks for an attribute on C{self} named C{writefile}, and if present, performs the write operation on the python file-like object found there. The attribute is named differently from C{readfile} to make it easy to implement read-only (or write-only) files, but if both attributes are present, they should refer to the same file. @param offset: position in the file to start reading from. @type offset: int or long @param data: data to write into the file. @type data: str @return: an SFTP error code like L{SFTP_OK}. """ writefile = getattr(self, 'writefile', None) if writefile is None: return SFTP_OP_UNSUPPORTED try: # in append mode, don't care about seeking if (self.__flags & os.O_APPEND) == 0: if self.__tell is None: self.__tell = writefile.tell() if offset != self.__tell: writefile.seek(offset) self.__tell = offset writefile.write(data) writefile.flush() except IOError, e: self.__tell = None return SFTPServer.convert_errno(e.errno) if self.__tell is not None: self.__tell += len(data) return SFTP_OK
[ "def", "write", "(", "self", ",", "offset", ",", "data", ")", ":", "writefile", "=", "getattr", "(", "self", ",", "'writefile'", ",", "None", ")", "if", "writefile", "is", "None", ":", "return", "SFTP_OP_UNSUPPORTED", "try", ":", "# in append mode, don't care about seeking", "if", "(", "self", ".", "__flags", "&", "os", ".", "O_APPEND", ")", "==", "0", ":", "if", "self", ".", "__tell", "is", "None", ":", "self", ".", "__tell", "=", "writefile", ".", "tell", "(", ")", "if", "offset", "!=", "self", ".", "__tell", ":", "writefile", ".", "seek", "(", "offset", ")", "self", ".", "__tell", "=", "offset", "writefile", ".", "write", "(", "data", ")", "writefile", ".", "flush", "(", ")", "except", "IOError", ",", "e", ":", "self", ".", "__tell", "=", "None", "return", "SFTPServer", ".", "convert_errno", "(", "e", ".", "errno", ")", "if", "self", ".", "__tell", "is", "not", "None", ":", "self", ".", "__tell", "+=", "len", "(", "data", ")", "return", "SFTP_OK" ]
Write C{data} into this file at position C{offset}. Extending the file past its original end is expected. Unlike python's normal C{write()} methods, this method cannot do a partial write: it must write all of C{data} or else return an error. The default implementation checks for an attribute on C{self} named C{writefile}, and if present, performs the write operation on the python file-like object found there. The attribute is named differently from C{readfile} to make it easy to implement read-only (or write-only) files, but if both attributes are present, they should refer to the same file. @param offset: position in the file to start reading from. @type offset: int or long @param data: data to write into the file. @type data: str @return: an SFTP error code like L{SFTP_OK}.
[ "Write", "C", "{", "data", "}", "into", "this", "file", "at", "position", "C", "{", "offset", "}", ".", "Extending", "the", "file", "past", "its", "original", "end", "is", "expected", ".", "Unlike", "python", "s", "normal", "C", "{", "write", "()", "}", "methods", "this", "method", "cannot", "do", "a", "partial", "write", ":", "it", "must", "write", "all", "of", "C", "{", "data", "}", "or", "else", "return", "an", "error", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/sftp_handle.py#L109-L147
train
bitprophet/ssh
ssh/sftp_si.py
SFTPServerInterface.canonicalize
def canonicalize(self, path): """ Return the canonical form of a path on the server. For example, if the server's home folder is C{/home/foo}, the path C{"../betty"} would be canonicalized to C{"/home/betty"}. Note the obvious security issues: if you're serving files only from a specific folder, you probably don't want this method to reveal path names outside that folder. You may find the python methods in C{os.path} useful, especially C{os.path.normpath} and C{os.path.realpath}. The default implementation returns C{os.path.normpath('/' + path)}. """ if os.path.isabs(path): out = os.path.normpath(path) else: out = os.path.normpath('/' + path) if sys.platform == 'win32': # on windows, normalize backslashes to sftp/posix format out = out.replace('\\', '/') return out
python
def canonicalize(self, path): """ Return the canonical form of a path on the server. For example, if the server's home folder is C{/home/foo}, the path C{"../betty"} would be canonicalized to C{"/home/betty"}. Note the obvious security issues: if you're serving files only from a specific folder, you probably don't want this method to reveal path names outside that folder. You may find the python methods in C{os.path} useful, especially C{os.path.normpath} and C{os.path.realpath}. The default implementation returns C{os.path.normpath('/' + path)}. """ if os.path.isabs(path): out = os.path.normpath(path) else: out = os.path.normpath('/' + path) if sys.platform == 'win32': # on windows, normalize backslashes to sftp/posix format out = out.replace('\\', '/') return out
[ "def", "canonicalize", "(", "self", ",", "path", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "out", "=", "os", ".", "path", ".", "normpath", "(", "path", ")", "else", ":", "out", "=", "os", ".", "path", ".", "normpath", "(", "'/'", "+", "path", ")", "if", "sys", ".", "platform", "==", "'win32'", ":", "# on windows, normalize backslashes to sftp/posix format", "out", "=", "out", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "return", "out" ]
Return the canonical form of a path on the server. For example, if the server's home folder is C{/home/foo}, the path C{"../betty"} would be canonicalized to C{"/home/betty"}. Note the obvious security issues: if you're serving files only from a specific folder, you probably don't want this method to reveal path names outside that folder. You may find the python methods in C{os.path} useful, especially C{os.path.normpath} and C{os.path.realpath}. The default implementation returns C{os.path.normpath('/' + path)}.
[ "Return", "the", "canonical", "form", "of", "a", "path", "on", "the", "server", ".", "For", "example", "if", "the", "server", "s", "home", "folder", "is", "C", "{", "/", "home", "/", "foo", "}", "the", "path", "C", "{", "..", "/", "betty", "}", "would", "be", "canonicalized", "to", "C", "{", "/", "home", "/", "betty", "}", ".", "Note", "the", "obvious", "security", "issues", ":", "if", "you", "re", "serving", "files", "only", "from", "a", "specific", "folder", "you", "probably", "don", "t", "want", "this", "method", "to", "reveal", "path", "names", "outside", "that", "folder", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/sftp_si.py#L259-L280
train
bitprophet/ssh
ssh/agent.py
AgentClientProxy.connect
def connect(self): """ Method automatically called by the run() method of the AgentProxyThread """ if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'): conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK'])) except: # probably a dangling env var: the ssh agent is gone return elif sys.platform == 'win32': import win_pageant if win_pageant.can_talk_to_agent(): conn = win_pageant.PageantConnection() else: return else: # no agent support return self._conn = conn
python
def connect(self): """ Method automatically called by the run() method of the AgentProxyThread """ if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'): conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK'])) except: # probably a dangling env var: the ssh agent is gone return elif sys.platform == 'win32': import win_pageant if win_pageant.can_talk_to_agent(): conn = win_pageant.PageantConnection() else: return else: # no agent support return self._conn = conn
[ "def", "connect", "(", "self", ")", ":", "if", "(", "'SSH_AUTH_SOCK'", "in", "os", ".", "environ", ")", "and", "(", "sys", ".", "platform", "!=", "'win32'", ")", ":", "conn", "=", "socket", ".", "socket", "(", "socket", ".", "AF_UNIX", ",", "socket", ".", "SOCK_STREAM", ")", "try", ":", "retry_on_signal", "(", "lambda", ":", "conn", ".", "connect", "(", "os", ".", "environ", "[", "'SSH_AUTH_SOCK'", "]", ")", ")", "except", ":", "# probably a dangling env var: the ssh agent is gone", "return", "elif", "sys", ".", "platform", "==", "'win32'", ":", "import", "win_pageant", "if", "win_pageant", ".", "can_talk_to_agent", "(", ")", ":", "conn", "=", "win_pageant", ".", "PageantConnection", "(", ")", "else", ":", "return", "else", ":", "# no agent support", "return", "self", ".", "_conn", "=", "conn" ]
Method automatically called by the run() method of the AgentProxyThread
[ "Method", "automatically", "called", "by", "the", "run", "()", "method", "of", "the", "AgentProxyThread" ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/agent.py#L199-L219
train
bitprophet/ssh
ssh/client.py
SSHClient.save_host_keys
def save_host_keys(self, filename): """ Save the host keys back to a file. Only the host keys loaded with L{load_host_keys} (plus any added directly) will be saved -- not any host keys loaded with L{load_system_host_keys}. @param filename: the filename to save to @type filename: str @raise IOError: if the file could not be written """ f = open(filename, 'w') f.write('# SSH host keys collected by ssh\n') for hostname, keys in self._host_keys.iteritems(): for keytype, key in keys.iteritems(): f.write('%s %s %s\n' % (hostname, keytype, key.get_base64())) f.close()
python
def save_host_keys(self, filename): """ Save the host keys back to a file. Only the host keys loaded with L{load_host_keys} (plus any added directly) will be saved -- not any host keys loaded with L{load_system_host_keys}. @param filename: the filename to save to @type filename: str @raise IOError: if the file could not be written """ f = open(filename, 'w') f.write('# SSH host keys collected by ssh\n') for hostname, keys in self._host_keys.iteritems(): for keytype, key in keys.iteritems(): f.write('%s %s %s\n' % (hostname, keytype, key.get_base64())) f.close()
[ "def", "save_host_keys", "(", "self", ",", "filename", ")", ":", "f", "=", "open", "(", "filename", ",", "'w'", ")", "f", ".", "write", "(", "'# SSH host keys collected by ssh\\n'", ")", "for", "hostname", ",", "keys", "in", "self", ".", "_host_keys", ".", "iteritems", "(", ")", ":", "for", "keytype", ",", "key", "in", "keys", ".", "iteritems", "(", ")", ":", "f", ".", "write", "(", "'%s %s %s\\n'", "%", "(", "hostname", ",", "keytype", ",", "key", ".", "get_base64", "(", ")", ")", ")", "f", ".", "close", "(", ")" ]
Save the host keys back to a file. Only the host keys loaded with L{load_host_keys} (plus any added directly) will be saved -- not any host keys loaded with L{load_system_host_keys}. @param filename: the filename to save to @type filename: str @raise IOError: if the file could not be written
[ "Save", "the", "host", "keys", "back", "to", "a", "file", ".", "Only", "the", "host", "keys", "loaded", "with", "L", "{", "load_host_keys", "}", "(", "plus", "any", "added", "directly", ")", "will", "be", "saved", "--", "not", "any", "host", "keys", "loaded", "with", "L", "{", "load_system_host_keys", "}", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/client.py#L179-L195
train
bitprophet/ssh
ssh/client.py
SSHClient.exec_command
def exec_command(self, command, bufsize=-1): """ Execute a command on the SSH server. A new L{Channel} is opened and the requested command is executed. The command's input and output streams are returned as python C{file}-like objects representing stdin, stdout, and stderr. @param command: the command to execute @type command: str @param bufsize: interpreted the same way as by the built-in C{file()} function in python @type bufsize: int @return: the stdin, stdout, and stderr of the executing command @rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile}) @raise SSHException: if the server fails to execute the command """ chan = self._transport.open_session() chan.exec_command(command) stdin = chan.makefile('wb', bufsize) stdout = chan.makefile('rb', bufsize) stderr = chan.makefile_stderr('rb', bufsize) return stdin, stdout, stderr
python
def exec_command(self, command, bufsize=-1): """ Execute a command on the SSH server. A new L{Channel} is opened and the requested command is executed. The command's input and output streams are returned as python C{file}-like objects representing stdin, stdout, and stderr. @param command: the command to execute @type command: str @param bufsize: interpreted the same way as by the built-in C{file()} function in python @type bufsize: int @return: the stdin, stdout, and stderr of the executing command @rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile}) @raise SSHException: if the server fails to execute the command """ chan = self._transport.open_session() chan.exec_command(command) stdin = chan.makefile('wb', bufsize) stdout = chan.makefile('rb', bufsize) stderr = chan.makefile_stderr('rb', bufsize) return stdin, stdout, stderr
[ "def", "exec_command", "(", "self", ",", "command", ",", "bufsize", "=", "-", "1", ")", ":", "chan", "=", "self", ".", "_transport", ".", "open_session", "(", ")", "chan", ".", "exec_command", "(", "command", ")", "stdin", "=", "chan", ".", "makefile", "(", "'wb'", ",", "bufsize", ")", "stdout", "=", "chan", ".", "makefile", "(", "'rb'", ",", "bufsize", ")", "stderr", "=", "chan", ".", "makefile_stderr", "(", "'rb'", ",", "bufsize", ")", "return", "stdin", ",", "stdout", ",", "stderr" ]
Execute a command on the SSH server. A new L{Channel} is opened and the requested command is executed. The command's input and output streams are returned as python C{file}-like objects representing stdin, stdout, and stderr. @param command: the command to execute @type command: str @param bufsize: interpreted the same way as by the built-in C{file()} function in python @type bufsize: int @return: the stdin, stdout, and stderr of the executing command @rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile}) @raise SSHException: if the server fails to execute the command
[ "Execute", "a", "command", "on", "the", "SSH", "server", ".", "A", "new", "L", "{", "Channel", "}", "is", "opened", "and", "the", "requested", "command", "is", "executed", ".", "The", "command", "s", "input", "and", "output", "streams", "are", "returned", "as", "python", "C", "{", "file", "}", "-", "like", "objects", "representing", "stdin", "stdout", "and", "stderr", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/client.py#L348-L369
train
bitprophet/ssh
ssh/client.py
SSHClient._auth
def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys): """ Try, in order: - The key passed in, if one was passed in. - Any key we can find through an SSH agent (if allowed). - Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed). - Plain username/password auth, if a password was given. (The password might be needed to unlock a private key.) The password is required for two-factor authentication. """ saved_exception = None two_factor = False allowed_types = [] if pkey is not None: try: self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint())) allowed_types = self._transport.auth_publickey(username, pkey) two_factor = (allowed_types == ['password']) if not two_factor: return except SSHException, e: saved_exception = e if not two_factor: for key_filename in key_filenames: for pkey_class in (RSAKey, DSSKey): try: key = pkey_class.from_private_key_file(key_filename, password) self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename)) self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e if not two_factor and allow_agent: if self._agent == None: self._agent = Agent() for key in self._agent.get_keys(): try: self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint())) # for 2-factor auth a successfully auth'd key will result in ['password'] allowed_types = self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e if not two_factor: keyfiles = [] rsa_key = os.path.expanduser('~/.ssh/id_rsa') dsa_key = os.path.expanduser('~/.ssh/id_dsa') if os.path.isfile(rsa_key): keyfiles.append((RSAKey, rsa_key)) if os.path.isfile(dsa_key): keyfiles.append((DSSKey, dsa_key)) # look in ~/ssh/ for windows users: rsa_key = os.path.expanduser('~/ssh/id_rsa') dsa_key = os.path.expanduser('~/ssh/id_dsa') if os.path.isfile(rsa_key): keyfiles.append((RSAKey, rsa_key)) if os.path.isfile(dsa_key): keyfiles.append((DSSKey, dsa_key)) if not look_for_keys: keyfiles = [] for pkey_class, filename in keyfiles: try: key = pkey_class.from_private_key_file(filename, password) self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename)) # for 2-factor auth a successfully auth'd key will result in ['password'] allowed_types = self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e except IOError, e: saved_exception = e if password is not None: try: self._transport.auth_password(username, password) return except SSHException, e: saved_exception = e elif two_factor: raise SSHException('Two-factor authentication requires a password') # if we got an auth-failed exception earlier, re-raise it if saved_exception is not None: raise saved_exception raise SSHException('No authentication methods available')
python
def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys): """ Try, in order: - The key passed in, if one was passed in. - Any key we can find through an SSH agent (if allowed). - Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed). - Plain username/password auth, if a password was given. (The password might be needed to unlock a private key.) The password is required for two-factor authentication. """ saved_exception = None two_factor = False allowed_types = [] if pkey is not None: try: self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint())) allowed_types = self._transport.auth_publickey(username, pkey) two_factor = (allowed_types == ['password']) if not two_factor: return except SSHException, e: saved_exception = e if not two_factor: for key_filename in key_filenames: for pkey_class in (RSAKey, DSSKey): try: key = pkey_class.from_private_key_file(key_filename, password) self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename)) self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e if not two_factor and allow_agent: if self._agent == None: self._agent = Agent() for key in self._agent.get_keys(): try: self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint())) # for 2-factor auth a successfully auth'd key will result in ['password'] allowed_types = self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e if not two_factor: keyfiles = [] rsa_key = os.path.expanduser('~/.ssh/id_rsa') dsa_key = os.path.expanduser('~/.ssh/id_dsa') if os.path.isfile(rsa_key): keyfiles.append((RSAKey, rsa_key)) if os.path.isfile(dsa_key): keyfiles.append((DSSKey, dsa_key)) # look in ~/ssh/ for windows users: rsa_key = os.path.expanduser('~/ssh/id_rsa') dsa_key = os.path.expanduser('~/ssh/id_dsa') if os.path.isfile(rsa_key): keyfiles.append((RSAKey, rsa_key)) if os.path.isfile(dsa_key): keyfiles.append((DSSKey, dsa_key)) if not look_for_keys: keyfiles = [] for pkey_class, filename in keyfiles: try: key = pkey_class.from_private_key_file(filename, password) self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename)) # for 2-factor auth a successfully auth'd key will result in ['password'] allowed_types = self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e except IOError, e: saved_exception = e if password is not None: try: self._transport.auth_password(username, password) return except SSHException, e: saved_exception = e elif two_factor: raise SSHException('Two-factor authentication requires a password') # if we got an auth-failed exception earlier, re-raise it if saved_exception is not None: raise saved_exception raise SSHException('No authentication methods available')
[ "def", "_auth", "(", "self", ",", "username", ",", "password", ",", "pkey", ",", "key_filenames", ",", "allow_agent", ",", "look_for_keys", ")", ":", "saved_exception", "=", "None", "two_factor", "=", "False", "allowed_types", "=", "[", "]", "if", "pkey", "is", "not", "None", ":", "try", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Trying SSH key %s'", "%", "hexlify", "(", "pkey", ".", "get_fingerprint", "(", ")", ")", ")", "allowed_types", "=", "self", ".", "_transport", ".", "auth_publickey", "(", "username", ",", "pkey", ")", "two_factor", "=", "(", "allowed_types", "==", "[", "'password'", "]", ")", "if", "not", "two_factor", ":", "return", "except", "SSHException", ",", "e", ":", "saved_exception", "=", "e", "if", "not", "two_factor", ":", "for", "key_filename", "in", "key_filenames", ":", "for", "pkey_class", "in", "(", "RSAKey", ",", "DSSKey", ")", ":", "try", ":", "key", "=", "pkey_class", ".", "from_private_key_file", "(", "key_filename", ",", "password", ")", "self", ".", "_log", "(", "DEBUG", ",", "'Trying key %s from %s'", "%", "(", "hexlify", "(", "key", ".", "get_fingerprint", "(", ")", ")", ",", "key_filename", ")", ")", "self", ".", "_transport", ".", "auth_publickey", "(", "username", ",", "key", ")", "two_factor", "=", "(", "allowed_types", "==", "[", "'password'", "]", ")", "if", "not", "two_factor", ":", "return", "break", "except", "SSHException", ",", "e", ":", "saved_exception", "=", "e", "if", "not", "two_factor", "and", "allow_agent", ":", "if", "self", ".", "_agent", "==", "None", ":", "self", ".", "_agent", "=", "Agent", "(", ")", "for", "key", "in", "self", ".", "_agent", ".", "get_keys", "(", ")", ":", "try", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Trying SSH agent key %s'", "%", "hexlify", "(", "key", ".", "get_fingerprint", "(", ")", ")", ")", "# for 2-factor auth a successfully auth'd key will result in ['password']", "allowed_types", "=", "self", ".", "_transport", ".", "auth_publickey", "(", "username", ",", "key", ")", "two_factor", "=", "(", "allowed_types", "==", "[", "'password'", "]", ")", "if", "not", "two_factor", ":", "return", "break", "except", "SSHException", ",", "e", ":", "saved_exception", "=", "e", "if", "not", "two_factor", ":", "keyfiles", "=", "[", "]", "rsa_key", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.ssh/id_rsa'", ")", "dsa_key", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.ssh/id_dsa'", ")", "if", "os", ".", "path", ".", "isfile", "(", "rsa_key", ")", ":", "keyfiles", ".", "append", "(", "(", "RSAKey", ",", "rsa_key", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "dsa_key", ")", ":", "keyfiles", ".", "append", "(", "(", "DSSKey", ",", "dsa_key", ")", ")", "# look in ~/ssh/ for windows users:", "rsa_key", "=", "os", ".", "path", ".", "expanduser", "(", "'~/ssh/id_rsa'", ")", "dsa_key", "=", "os", ".", "path", ".", "expanduser", "(", "'~/ssh/id_dsa'", ")", "if", "os", ".", "path", ".", "isfile", "(", "rsa_key", ")", ":", "keyfiles", ".", "append", "(", "(", "RSAKey", ",", "rsa_key", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "dsa_key", ")", ":", "keyfiles", ".", "append", "(", "(", "DSSKey", ",", "dsa_key", ")", ")", "if", "not", "look_for_keys", ":", "keyfiles", "=", "[", "]", "for", "pkey_class", ",", "filename", "in", "keyfiles", ":", "try", ":", "key", "=", "pkey_class", ".", "from_private_key_file", "(", "filename", ",", "password", ")", "self", ".", "_log", "(", "DEBUG", ",", "'Trying discovered key %s in %s'", "%", "(", "hexlify", "(", "key", ".", "get_fingerprint", "(", ")", ")", ",", "filename", ")", ")", "# for 2-factor auth a successfully auth'd key will result in ['password']", "allowed_types", "=", "self", ".", "_transport", ".", "auth_publickey", "(", "username", ",", "key", ")", "two_factor", "=", "(", "allowed_types", "==", "[", "'password'", "]", ")", "if", "not", "two_factor", ":", "return", "break", "except", "SSHException", ",", "e", ":", "saved_exception", "=", "e", "except", "IOError", ",", "e", ":", "saved_exception", "=", "e", "if", "password", "is", "not", "None", ":", "try", ":", "self", ".", "_transport", ".", "auth_password", "(", "username", ",", "password", ")", "return", "except", "SSHException", ",", "e", ":", "saved_exception", "=", "e", "elif", "two_factor", ":", "raise", "SSHException", "(", "'Two-factor authentication requires a password'", ")", "# if we got an auth-failed exception earlier, re-raise it", "if", "saved_exception", "is", "not", "None", ":", "raise", "saved_exception", "raise", "SSHException", "(", "'No authentication methods available'", ")" ]
Try, in order: - The key passed in, if one was passed in. - Any key we can find through an SSH agent (if allowed). - Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed). - Plain username/password auth, if a password was given. (The password might be needed to unlock a private key.) The password is required for two-factor authentication.
[ "Try", "in", "order", ":" ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/client.py#L413-L516
train
bitprophet/ssh
ssh/message.py
Message.get_bytes
def get_bytes(self, n): """ Return the next C{n} bytes of the Message, without decomposing into an int, string, etc. Just the raw bytes are returned. @return: a string of the next C{n} bytes of the Message, or a string of C{n} zero bytes, if there aren't C{n} bytes remaining. @rtype: string """ b = self.packet.read(n) if len(b) < n: return b + '\x00' * (n - len(b)) return b
python
def get_bytes(self, n): """ Return the next C{n} bytes of the Message, without decomposing into an int, string, etc. Just the raw bytes are returned. @return: a string of the next C{n} bytes of the Message, or a string of C{n} zero bytes, if there aren't C{n} bytes remaining. @rtype: string """ b = self.packet.read(n) if len(b) < n: return b + '\x00' * (n - len(b)) return b
[ "def", "get_bytes", "(", "self", ",", "n", ")", ":", "b", "=", "self", ".", "packet", ".", "read", "(", "n", ")", "if", "len", "(", "b", ")", "<", "n", ":", "return", "b", "+", "'\\x00'", "*", "(", "n", "-", "len", "(", "b", ")", ")", "return", "b" ]
Return the next C{n} bytes of the Message, without decomposing into an int, string, etc. Just the raw bytes are returned. @return: a string of the next C{n} bytes of the Message, or a string of C{n} zero bytes, if there aren't C{n} bytes remaining. @rtype: string
[ "Return", "the", "next", "C", "{", "n", "}", "bytes", "of", "the", "Message", "without", "decomposing", "into", "an", "int", "string", "etc", ".", "Just", "the", "raw", "bytes", "are", "returned", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/message.py#L103-L115
train
bitprophet/ssh
ssh/message.py
Message.add_int
def add_int(self, n): """ Add an integer to the stream. @param n: integer to add @type n: int """ self.packet.write(struct.pack('>I', n)) return self
python
def add_int(self, n): """ Add an integer to the stream. @param n: integer to add @type n: int """ self.packet.write(struct.pack('>I', n)) return self
[ "def", "add_int", "(", "self", ",", "n", ")", ":", "self", ".", "packet", ".", "write", "(", "struct", ".", "pack", "(", "'>I'", ",", "n", ")", ")", "return", "self" ]
Add an integer to the stream. @param n: integer to add @type n: int
[ "Add", "an", "integer", "to", "the", "stream", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/message.py#L219-L227
train
bitprophet/ssh
ssh/message.py
Message.add_string
def add_string(self, s): """ Add a string to the stream. @param s: string to add @type s: str """ self.add_int(len(s)) self.packet.write(s) return self
python
def add_string(self, s): """ Add a string to the stream. @param s: string to add @type s: str """ self.add_int(len(s)) self.packet.write(s) return self
[ "def", "add_string", "(", "self", ",", "s", ")", ":", "self", ".", "add_int", "(", "len", "(", "s", ")", ")", "self", ".", "packet", ".", "write", "(", "s", ")", "return", "self" ]
Add a string to the stream. @param s: string to add @type s: str
[ "Add", "a", "string", "to", "the", "stream", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/message.py#L250-L259
train
bitprophet/ssh
ssh/channel.py
Channel.resize_pty
def resize_pty(self, width=80, height=24): """ Resize the pseudo-terminal. This can be used to change the width and height of the terminal emulation created in a previous L{get_pty} call. @param width: new width (in characters) of the terminal screen @type width: int @param height: new height (in characters) of the terminal screen @type height: int @raise SSHException: if the request was rejected or the channel was closed """ if self.closed or self.eof_received or self.eof_sent or not self.active: raise SSHException('Channel is not open') m = Message() m.add_byte(chr(MSG_CHANNEL_REQUEST)) m.add_int(self.remote_chanid) m.add_string('window-change') m.add_boolean(True) m.add_int(width) m.add_int(height) m.add_int(0).add_int(0) self._event_pending() self.transport._send_user_message(m) self._wait_for_event()
python
def resize_pty(self, width=80, height=24): """ Resize the pseudo-terminal. This can be used to change the width and height of the terminal emulation created in a previous L{get_pty} call. @param width: new width (in characters) of the terminal screen @type width: int @param height: new height (in characters) of the terminal screen @type height: int @raise SSHException: if the request was rejected or the channel was closed """ if self.closed or self.eof_received or self.eof_sent or not self.active: raise SSHException('Channel is not open') m = Message() m.add_byte(chr(MSG_CHANNEL_REQUEST)) m.add_int(self.remote_chanid) m.add_string('window-change') m.add_boolean(True) m.add_int(width) m.add_int(height) m.add_int(0).add_int(0) self._event_pending() self.transport._send_user_message(m) self._wait_for_event()
[ "def", "resize_pty", "(", "self", ",", "width", "=", "80", ",", "height", "=", "24", ")", ":", "if", "self", ".", "closed", "or", "self", ".", "eof_received", "or", "self", ".", "eof_sent", "or", "not", "self", ".", "active", ":", "raise", "SSHException", "(", "'Channel is not open'", ")", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "chr", "(", "MSG_CHANNEL_REQUEST", ")", ")", "m", ".", "add_int", "(", "self", ".", "remote_chanid", ")", "m", ".", "add_string", "(", "'window-change'", ")", "m", ".", "add_boolean", "(", "True", ")", "m", ".", "add_int", "(", "width", ")", "m", ".", "add_int", "(", "height", ")", "m", ".", "add_int", "(", "0", ")", ".", "add_int", "(", "0", ")", "self", ".", "_event_pending", "(", ")", "self", ".", "transport", ".", "_send_user_message", "(", "m", ")", "self", ".", "_wait_for_event", "(", ")" ]
Resize the pseudo-terminal. This can be used to change the width and height of the terminal emulation created in a previous L{get_pty} call. @param width: new width (in characters) of the terminal screen @type width: int @param height: new height (in characters) of the terminal screen @type height: int @raise SSHException: if the request was rejected or the channel was closed
[ "Resize", "the", "pseudo", "-", "terminal", ".", "This", "can", "be", "used", "to", "change", "the", "width", "and", "height", "of", "the", "terminal", "emulation", "created", "in", "a", "previous", "L", "{", "get_pty", "}", "call", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L242-L267
train
bitprophet/ssh
ssh/channel.py
Channel.recv_exit_status
def recv_exit_status(self): """ Return the exit status from the process on the server. This is mostly useful for retrieving the reults of an L{exec_command}. If the command hasn't finished yet, this method will wait until it does, or until the channel is closed. If no exit status is provided by the server, -1 is returned. @return: the exit code of the process on the server. @rtype: int @since: 1.2 """ self.status_event.wait() assert self.status_event.isSet() return self.exit_status
python
def recv_exit_status(self): """ Return the exit status from the process on the server. This is mostly useful for retrieving the reults of an L{exec_command}. If the command hasn't finished yet, this method will wait until it does, or until the channel is closed. If no exit status is provided by the server, -1 is returned. @return: the exit code of the process on the server. @rtype: int @since: 1.2 """ self.status_event.wait() assert self.status_event.isSet() return self.exit_status
[ "def", "recv_exit_status", "(", "self", ")", ":", "self", ".", "status_event", ".", "wait", "(", ")", "assert", "self", ".", "status_event", ".", "isSet", "(", ")", "return", "self", ".", "exit_status" ]
Return the exit status from the process on the server. This is mostly useful for retrieving the reults of an L{exec_command}. If the command hasn't finished yet, this method will wait until it does, or until the channel is closed. If no exit status is provided by the server, -1 is returned. @return: the exit code of the process on the server. @rtype: int @since: 1.2
[ "Return", "the", "exit", "status", "from", "the", "process", "on", "the", "server", ".", "This", "is", "mostly", "useful", "for", "retrieving", "the", "reults", "of", "an", "L", "{", "exec_command", "}", ".", "If", "the", "command", "hasn", "t", "finished", "yet", "this", "method", "will", "wait", "until", "it", "does", "or", "until", "the", "channel", "is", "closed", ".", "If", "no", "exit", "status", "is", "provided", "by", "the", "server", "-", "1", "is", "returned", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L282-L297
train
bitprophet/ssh
ssh/channel.py
Channel.send_exit_status
def send_exit_status(self, status): """ Send the exit status of an executed command to the client. (This really only makes sense in server mode.) Many clients expect to get some sort of status code back from an executed command after it completes. @param status: the exit code of the process @type status: int @since: 1.2 """ # in many cases, the channel will not still be open here. # that's fine. m = Message() m.add_byte(chr(MSG_CHANNEL_REQUEST)) m.add_int(self.remote_chanid) m.add_string('exit-status') m.add_boolean(False) m.add_int(status) self.transport._send_user_message(m)
python
def send_exit_status(self, status): """ Send the exit status of an executed command to the client. (This really only makes sense in server mode.) Many clients expect to get some sort of status code back from an executed command after it completes. @param status: the exit code of the process @type status: int @since: 1.2 """ # in many cases, the channel will not still be open here. # that's fine. m = Message() m.add_byte(chr(MSG_CHANNEL_REQUEST)) m.add_int(self.remote_chanid) m.add_string('exit-status') m.add_boolean(False) m.add_int(status) self.transport._send_user_message(m)
[ "def", "send_exit_status", "(", "self", ",", "status", ")", ":", "# in many cases, the channel will not still be open here.", "# that's fine.", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "chr", "(", "MSG_CHANNEL_REQUEST", ")", ")", "m", ".", "add_int", "(", "self", ".", "remote_chanid", ")", "m", ".", "add_string", "(", "'exit-status'", ")", "m", ".", "add_boolean", "(", "False", ")", "m", ".", "add_int", "(", "status", ")", "self", ".", "transport", ".", "_send_user_message", "(", "m", ")" ]
Send the exit status of an executed command to the client. (This really only makes sense in server mode.) Many clients expect to get some sort of status code back from an executed command after it completes. @param status: the exit code of the process @type status: int @since: 1.2
[ "Send", "the", "exit", "status", "of", "an", "executed", "command", "to", "the", "client", ".", "(", "This", "really", "only", "makes", "sense", "in", "server", "mode", ".", ")", "Many", "clients", "expect", "to", "get", "some", "sort", "of", "status", "code", "back", "from", "an", "executed", "command", "after", "it", "completes", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L299-L319
train
bitprophet/ssh
ssh/channel.py
Channel.recv
def recv(self, nbytes): """ Receive data from the channel. The return value is a string representing the data received. The maximum amount of data to be received at once is specified by C{nbytes}. If a string of length zero is returned, the channel stream has closed. @param nbytes: maximum number of bytes to read. @type nbytes: int @return: data. @rtype: str @raise socket.timeout: if no data is ready before the timeout set by L{settimeout}. """ try: out = self.in_buffer.read(nbytes, self.timeout) except PipeTimeout, e: raise socket.timeout() ack = self._check_add_window(len(out)) # no need to hold the channel lock when sending this if ack > 0: m = Message() m.add_byte(chr(MSG_CHANNEL_WINDOW_ADJUST)) m.add_int(self.remote_chanid) m.add_int(ack) self.transport._send_user_message(m) return out
python
def recv(self, nbytes): """ Receive data from the channel. The return value is a string representing the data received. The maximum amount of data to be received at once is specified by C{nbytes}. If a string of length zero is returned, the channel stream has closed. @param nbytes: maximum number of bytes to read. @type nbytes: int @return: data. @rtype: str @raise socket.timeout: if no data is ready before the timeout set by L{settimeout}. """ try: out = self.in_buffer.read(nbytes, self.timeout) except PipeTimeout, e: raise socket.timeout() ack = self._check_add_window(len(out)) # no need to hold the channel lock when sending this if ack > 0: m = Message() m.add_byte(chr(MSG_CHANNEL_WINDOW_ADJUST)) m.add_int(self.remote_chanid) m.add_int(ack) self.transport._send_user_message(m) return out
[ "def", "recv", "(", "self", ",", "nbytes", ")", ":", "try", ":", "out", "=", "self", ".", "in_buffer", ".", "read", "(", "nbytes", ",", "self", ".", "timeout", ")", "except", "PipeTimeout", ",", "e", ":", "raise", "socket", ".", "timeout", "(", ")", "ack", "=", "self", ".", "_check_add_window", "(", "len", "(", "out", ")", ")", "# no need to hold the channel lock when sending this", "if", "ack", ">", "0", ":", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "chr", "(", "MSG_CHANNEL_WINDOW_ADJUST", ")", ")", "m", ".", "add_int", "(", "self", ".", "remote_chanid", ")", "m", ".", "add_int", "(", "ack", ")", "self", ".", "transport", ".", "_send_user_message", "(", "m", ")", "return", "out" ]
Receive data from the channel. The return value is a string representing the data received. The maximum amount of data to be received at once is specified by C{nbytes}. If a string of length zero is returned, the channel stream has closed. @param nbytes: maximum number of bytes to read. @type nbytes: int @return: data. @rtype: str @raise socket.timeout: if no data is ready before the timeout set by L{settimeout}.
[ "Receive", "data", "from", "the", "channel", ".", "The", "return", "value", "is", "a", "string", "representing", "the", "data", "received", ".", "The", "maximum", "amount", "of", "data", "to", "be", "received", "at", "once", "is", "specified", "by", "C", "{", "nbytes", "}", ".", "If", "a", "string", "of", "length", "zero", "is", "returned", "the", "channel", "stream", "has", "closed", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L593-L622
train
bitprophet/ssh
ssh/channel.py
Channel.sendall
def sendall(self, s): """ Send data to the channel, without allowing partial results. Unlike L{send}, this method continues to send data from the given string until either all data has been sent or an error occurs. Nothing is returned. @param s: data to send. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @note: If the channel is closed while only part of the data hase been sent, there is no way to determine how much data (if any) was sent. This is irritating, but identically follows python's API. """ while s: if self.closed: # this doesn't seem useful, but it is the documented behavior of Socket raise socket.error('Socket is closed') sent = self.send(s) s = s[sent:] return None
python
def sendall(self, s): """ Send data to the channel, without allowing partial results. Unlike L{send}, this method continues to send data from the given string until either all data has been sent or an error occurs. Nothing is returned. @param s: data to send. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @note: If the channel is closed while only part of the data hase been sent, there is no way to determine how much data (if any) was sent. This is irritating, but identically follows python's API. """ while s: if self.closed: # this doesn't seem useful, but it is the documented behavior of Socket raise socket.error('Socket is closed') sent = self.send(s) s = s[sent:] return None
[ "def", "sendall", "(", "self", ",", "s", ")", ":", "while", "s", ":", "if", "self", ".", "closed", ":", "# this doesn't seem useful, but it is the documented behavior of Socket", "raise", "socket", ".", "error", "(", "'Socket is closed'", ")", "sent", "=", "self", ".", "send", "(", "s", ")", "s", "=", "s", "[", "sent", ":", "]", "return", "None" ]
Send data to the channel, without allowing partial results. Unlike L{send}, this method continues to send data from the given string until either all data has been sent or an error occurs. Nothing is returned. @param s: data to send. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @note: If the channel is closed while only part of the data hase been sent, there is no way to determine how much data (if any) was sent. This is irritating, but identically follows python's API.
[ "Send", "data", "to", "the", "channel", "without", "allowing", "partial", "results", ".", "Unlike", "L", "{", "send", "}", "this", "method", "continues", "to", "send", "data", "from", "the", "given", "string", "until", "either", "all", "data", "has", "been", "sent", "or", "an", "error", "occurs", ".", "Nothing", "is", "returned", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L767-L791
train
bitprophet/ssh
ssh/channel.py
Channel.sendall_stderr
def sendall_stderr(self, s): """ Send data to the channel's "stderr" stream, without allowing partial results. Unlike L{send_stderr}, this method continues to send data from the given string until all data has been sent or an error occurs. Nothing is returned. @param s: data to send to the client as "stderr" output. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @since: 1.1 """ while s: if self.closed: raise socket.error('Socket is closed') sent = self.send_stderr(s) s = s[sent:] return None
python
def sendall_stderr(self, s): """ Send data to the channel's "stderr" stream, without allowing partial results. Unlike L{send_stderr}, this method continues to send data from the given string until all data has been sent or an error occurs. Nothing is returned. @param s: data to send to the client as "stderr" output. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @since: 1.1 """ while s: if self.closed: raise socket.error('Socket is closed') sent = self.send_stderr(s) s = s[sent:] return None
[ "def", "sendall_stderr", "(", "self", ",", "s", ")", ":", "while", "s", ":", "if", "self", ".", "closed", ":", "raise", "socket", ".", "error", "(", "'Socket is closed'", ")", "sent", "=", "self", ".", "send_stderr", "(", "s", ")", "s", "=", "s", "[", "sent", ":", "]", "return", "None" ]
Send data to the channel's "stderr" stream, without allowing partial results. Unlike L{send_stderr}, this method continues to send data from the given string until all data has been sent or an error occurs. Nothing is returned. @param s: data to send to the client as "stderr" output. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @since: 1.1
[ "Send", "data", "to", "the", "channel", "s", "stderr", "stream", "without", "allowing", "partial", "results", ".", "Unlike", "L", "{", "send_stderr", "}", "this", "method", "continues", "to", "send", "data", "from", "the", "given", "string", "until", "all", "data", "has", "been", "sent", "or", "an", "error", "occurs", ".", "Nothing", "is", "returned", "." ]
e8bdad4c82a50158a749233dca58c29e47c60b76
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L793-L815
train