repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
predicador37/pyjstat | pyjstat/pyjstat.py | Dataset.read | python | def read(cls, data):
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise | Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L579-L612 | [
"def request(path):\n \"\"\"Send a request to a given URL accepting JSON format and return a \\\n deserialized Python object.\n\n Args:\n path (str): The URI to be requested.\n\n Returns:\n response: Deserialized JSON Python object.\n\n Raises:\n HTTPError: the HTTP error returned b... | class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
|
predicador37/pyjstat | pyjstat/pyjstat.py | Dataset.get_dimension_index | python | def get_dimension_index(self, name, value):
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value] | Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L633-L653 | null | class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
|
predicador37/pyjstat | pyjstat/pyjstat.py | Dataset.get_dimension_indices | python | def get_dimension_indices(self, query):
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices | Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L655-L673 | [
"def get_dimension_index(self, name, value):\n \"\"\"Converts a dimension ID string and a categody ID string into the \\\n numeric index of that category in that dimension\n Args:\n name(string): ID string of the dimension.\n value(string): ID string of the category.\n\n Returns:\n ... | class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
|
predicador37/pyjstat | pyjstat/pyjstat.py | Dataset.get_value_index | python | def get_value_index(self, indices):
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num | Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L675-L692 | null | class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
|
predicador37/pyjstat | pyjstat/pyjstat.py | Dataset.get_value | python | def get_value(self, query):
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value | Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L706-L720 | [
"def get_dimension_indices(self, query):\n \"\"\"Converts a dimension/category list of dicts into a list of \\\n dimensions’ indices.\n Args:\n query(list): dimension/category list of dicts.\n\n Returns:\n indices(list): list of dimensions' indices.\n\n \"\"\"\n ids = self['id'] if ... | class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
|
predicador37/pyjstat | pyjstat/pyjstat.py | Dimension.read | python | def read(cls, data):
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise | Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L731-L772 | [
"def request(path):\n \"\"\"Send a request to a given URL accepting JSON format and return a \\\n deserialized Python object.\n\n Args:\n path (str): The URI to be requested.\n\n Returns:\n response: Deserialized JSON Python object.\n\n Raises:\n HTTPError: the HTTP error returned b... | class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | Dimension.write | python | def write(self, output='jsonstat'):
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'") | Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L774-L790 | [
"def get_dim_label(js_dict, dim, input=\"dataset\"):\n \"\"\"Get label from a given dimension.\n\n Args:\n js_dict (dict): dictionary containing dataset data and metadata.\n dim (string): dimension name obtained from JSON file.\n\n Returns:\n dim_label(pandas.DataFrame): DataFrame with label... | class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
|
predicador37/pyjstat | pyjstat/pyjstat.py | Collection.read | python | def read(cls, data):
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise | Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L801-L827 | [
"def request(path):\n \"\"\"Send a request to a given URL accepting JSON format and return a \\\n deserialized Python object.\n\n Args:\n path (str): The URI to be requested.\n\n Returns:\n response: Deserialized JSON Python object.\n\n Raises:\n HTTPError: the HTTP error returned b... | class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | Collection.write | python | def write(self, output='jsonstat'):
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'") | Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L829-L849 | [
"def unnest_collection(collection, df_list):\n \"\"\"Unnest collection structure extracting all its datasets and converting \\\n them to Pandas Dataframes.\n\n Args:\n collection (OrderedDict): data in JSON-stat format, previously \\\n deserialized t... | class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | Collection.get | python | def get(self, element):
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'") | Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L851-L872 | [
"def read(cls, data):\n \"\"\"Reads data from URL, Dataframe, JSON string, JSON file or\n OrderedDict.\n Args:\n data: can be a Pandas Dataframe, a JSON file, a JSON string,\n an OrderedDict or a URL pointing to a JSONstat file.\n\n Returns:\n An object of class Dataset pop... | class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
|
YeoLab/anchor | anchor/simulate.py | plot_best_worst_fits | python | def plot_best_worst_fits(assignments_df, data, modality_col='Modality',
score='$\log_2 K$'):
ncols = 2
nrows = len(assignments_df.groupby(modality_col).groups.keys())
fig, axes = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(nrows*4, ncols*6))
axes_iter = axes.flat
fits = 'Highest', 'Lowest'
for modality, df in assignments_df.groupby(modality_col):
df = df.sort_values(score)
color = MODALITY_TO_COLOR[modality]
for fit in fits:
if fit == 'Highest':
ids = df['Feature ID'][-10:]
else:
ids = df['Feature ID'][:10]
fit_psi = data[ids]
tidy_fit_psi = fit_psi.stack().reset_index()
tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID',
'level_1':
'Feature ID',
0: '$\Psi$'})
if tidy_fit_psi.empty:
continue
ax = six.next(axes_iter)
violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi,
color=color, ax=ax)
ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[])
sns.despine()
fig.tight_layout() | Violinplots of the highest and lowest scoring of each modality | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/simulate.py#L158-L194 | [
"def violinplot(x=None, y=None, data=None, bw=0.2, scale='width',\n inner=None, ax=None, **kwargs):\n \"\"\"Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data\n\n What's different:\n - bw = 0.2: Sets bandwidth to be small and the same between datasets\n - scale = 'widt... |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import six
from .visualize import violinplot, MODALITY_ORDER, MODALITY_TO_COLOR, barplot
def add_noise(data, iteration_per_noise=100,
noise_percentages=np.arange(0, 101, step=10), plot=True,
violinplot_kws=None, figure_prefix='anchor_simulation'):
data_dfs = []
violinplot_kws = {} if violinplot_kws is None else violinplot_kws
width = len(data.columns) * 0.75
alpha = max(0.05, 1. / iteration_per_noise)
for noise_percentage in noise_percentages:
if plot:
fig, ax = plt.subplots(figsize=(width, 3))
for iteration in range(iteration_per_noise):
if iteration > 0 and noise_percentage == 0:
continue
noisy_data = data.copy()
shape = (noisy_data.shape[0] * noise_percentage / 100,
noisy_data.shape[1])
size = np.product(shape)
noise_ind = np.random.choice(noisy_data.index,
size=noise_percentage,
replace=False)
noisy_data.loc[noise_ind] = np.random.uniform(
low=0., high=1., size=size).reshape(shape)
renamer = dict(
(col, '{}_noise{}_iter{}'.format(
col, noise_percentage, iteration))
for col in noisy_data.columns)
renamed = noisy_data.rename(columns=renamer)
data_dfs.append(renamed)
if plot:
noisy_data_tidy = noisy_data.unstack()
noisy_data_tidy = noisy_data_tidy.reset_index()
noisy_data_tidy = noisy_data_tidy.rename(
columns={'level_0': 'Feature ID',
'level_1': 'Sample ID',
0: '$\Psi$'})
violinplot(x='Feature ID', y='$\Psi$',
data=noisy_data_tidy, ax=ax,
**violinplot_kws)
if plot:
if noise_percentage > 0:
for c in ax.collections:
c.set_alpha(alpha)
ax.set(ylim=(0, 1), title='{}% Uniform Noise'.format(
noise_percentage), yticks=(0, 0.5, 1), ylabel='$\Psi$',
xlabel='')
plt.setp(ax.get_xticklabels(), rotation=90)
sns.despine()
fig.tight_layout()
fig.savefig('{}_noise_percentage_{}.pdf'.format(figure_prefix,
noise_percentage))
all_noisy_data = pd.concat(data_dfs, axis=1)
return all_noisy_data
class ModalityEvaluator(object):
def __init__(self, estimator, data, waypoints, fitted, predicted):
self.estimator = estimator
self.data = data
self.predicted = predicted
self.fitted = fitted
self.waypoints = waypoints
def evaluate_estimator(estimator, data, waypoints=None, figure_prefix=''):
#
# estimator.violinplot(n=1e3)
# fig = plt.gcf()
# for ax in fig.axes:
# ax.set(yticks=[0, 0.5, 1], xlabel='')
# # xticklabels =
# # ax.set_xticklabels(fontsize=20)
# fig.tight_layout()
# sns.despine()
# fig.savefig('{}_modality_parameterization.pdf'.format(figure_prefix))
fitted = estimator.fit(data)
predicted = estimator.predict(fitted)
predicted.name = 'Predicted Modality'
fitted_tidy = fitted.stack().reset_index()
fitted_tidy = fitted_tidy.rename(
columns={'level_1': 'Feature ID', 'level_0': "Modality",
0: estimator.score_name}, copy=False)
predicted_tidy = predicted.to_frame().reset_index()
predicted_tidy = predicted_tidy.rename(columns={'index': 'Feature ID'})
predicted_tidy = predicted_tidy.merge(
fitted_tidy, left_on=['Feature ID', 'Predicted Modality'],
right_on=['Feature ID', 'Modality'])
# Make categorical so they are plotted in the correct order
predicted_tidy['Predicted Modality'] = \
pd.Categorical(predicted_tidy['Predicted Modality'],
categories=MODALITY_ORDER, ordered=True)
predicted_tidy['Modality'] = \
pd.Categorical(predicted_tidy['Modality'],
categories=MODALITY_ORDER, ordered=True)
grouped = data.groupby(predicted, axis=1)
size = 5
fig, axes = plt.subplots(figsize=(size*0.75, 8), nrows=len(grouped))
for ax, (modality, df) in zip(axes, grouped):
random_ids = np.random.choice(df.columns, replace=False, size=size)
random_df = df[random_ids]
tidy_random = random_df.stack().reset_index()
tidy_random = tidy_random.rename(columns={'level_0': 'sample_id',
'level_1': 'event_id',
0: '$\Psi$'})
sns.violinplot(x='event_id', y='$\Psi$', data=tidy_random,
color=MODALITY_TO_COLOR[modality], ax=ax,
inner=None, bw=0.2, scale='width')
ax.set(ylim=(0, 1), yticks=(0, 0.5, 1), xticks=[], xlabel='',
title=modality)
sns.despine()
fig.tight_layout()
fig.savefig('{}_random_estimated_modalities.pdf'.format(figure_prefix))
g = barplot(predicted_tidy, hue='Modality')
g.savefig('{}_modalities_barplot.pdf'.format(figure_prefix))
plot_best_worst_fits(predicted_tidy, data, modality_col='Modality',
score=estimator.score_name)
fig = plt.gcf()
fig.savefig('{}_best_worst_fit_violinplots.pdf'.format(figure_prefix))
fitted.to_csv('{}_fitted.csv'.format(figure_prefix))
predicted.to_csv('{}_predicted.csv'.format(figure_prefix))
result = ModalityEvaluator(estimator, data, waypoints, fitted, predicted)
return result
|
YeoLab/anchor | anchor/visualize.py | violinplot | python | def violinplot(x=None, y=None, data=None, bw=0.2, scale='width',
inner=None, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
sns.violinplot(x, y, data=data, bw=bw, scale=scale, inner=inner, ax=ax,
**kwargs)
ax.set(ylim=(0, 1), yticks=(0, 0.5, 1))
return ax | Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data
What's different:
- bw = 0.2: Sets bandwidth to be small and the same between datasets
- scale = 'width': Sets the width of all violinplots to be the same
- inner = None: Don't plot a boxplot or points inside the violinplot | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/visualize.py#L33-L48 | null | # -*- coding: utf-8 -*-
"""See log bayes factors which led to modality categorization"""
import locale
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from .names import NEAR_ZERO, NEAR_HALF, NEAR_ONE, BIMODAL, \
NULL_MODEL
darkblue, green, red, purple, yellow, lightblue = sns.color_palette('deep')
MODALITY_ORDER = [NEAR_ZERO, BIMODAL, NEAR_ONE, NEAR_HALF, NULL_MODEL]
MODALITY_TO_COLOR = {NEAR_ZERO: lightblue, NEAR_HALF: yellow, NEAR_ONE: red,
BIMODAL: purple, NULL_MODEL: 'lightgrey'}
MODALITY_PALETTE = [MODALITY_TO_COLOR[m] for m in MODALITY_ORDER]
MODALITY_TO_CMAP = {
NEAR_ZERO: sns.light_palette(MODALITY_TO_COLOR[NEAR_ZERO], as_cmap=True),
NEAR_HALF: sns.light_palette(MODALITY_TO_COLOR[NEAR_HALF], as_cmap=True),
NEAR_ONE: sns.light_palette(MODALITY_TO_COLOR[NEAR_ONE], as_cmap=True),
BIMODAL: sns.light_palette(MODALITY_TO_COLOR[BIMODAL], as_cmap=True),
NULL_MODEL: mpl.cm.Greys}
MODALITY_FACTORPLOT_KWS = dict(hue_order=MODALITY_ORDER,
palette=MODALITY_PALETTE)
class _ModelLoglikPlotter(object):
def __init__(self):
self.fig = plt.figure(figsize=(5 * 2, 4))
self.ax_violin = plt.subplot2grid((3, 5), (0, 0), rowspan=3, colspan=1)
self.ax_loglik = plt.subplot2grid((3, 5), (0, 1), rowspan=3, colspan=3)
self.ax_bayesfactor = plt.subplot2grid((3, 5), (0, 4), rowspan=3,
colspan=1)
def plot(self, feature, logliks, logsumexps, log2bf_thresh, renamed=''):
modality = logsumexps.idxmax()
self.logliks = logliks
self.logsumexps = logsumexps
x = feature.to_frame()
if feature.name is None:
feature.name = 'Feature'
x['sample_id'] = feature.name
violinplot(x='sample_id', y=feature.name, data=x, ax=self.ax_violin,
color=MODALITY_TO_COLOR[modality])
self.ax_violin.set(xticks=[], ylabel='')
for name, loglik in logliks.groupby('Modality')[r'$\log$ Likelihood']:
# print name,
self.ax_loglik.plot(loglik, 'o-', label=name, alpha=0.75,
color=MODALITY_TO_COLOR[name])
self.ax_loglik.legend(loc='best')
self.ax_loglik.set(ylabel=r'$\log$ Likelihood',
xlabel='Parameterizations',
title='Assignment: {}'.format(modality))
self.ax_loglik.set_xlabel('phantom', color='white')
for i, (name, height) in enumerate(logsumexps.iteritems()):
self.ax_bayesfactor.bar(i, height, label=name,
color=MODALITY_TO_COLOR[name])
xmin, xmax = self.ax_bayesfactor.get_xlim()
self.ax_bayesfactor.hlines(log2bf_thresh, xmin, xmax,
linestyle='dashed')
self.ax_bayesfactor.set(ylabel='$\log K$', xticks=[])
if renamed:
text = '{} ({})'.format(feature.name, renamed)
else:
text = feature.name
self.fig.text(0.5, .025, text, fontsize=10, ha='center',
va='bottom')
sns.despine()
self.fig.tight_layout()
return self
class ModalitiesViz(object):
"""Visualize results of modality assignments"""
modality_order = MODALITY_ORDER
modality_to_color = MODALITY_TO_COLOR
modality_palette = MODALITY_PALETTE
def bar(self, counts, phenotype_to_color=None, ax=None, percentages=True):
"""Draw barplots grouped by modality of modality percentage per group
Parameters
----------
Returns
-------
Raises
------
"""
if percentages:
counts = 100 * (counts.T / counts.T.sum()).T
# with sns.set(style='whitegrid'):
if ax is None:
ax = plt.gca()
full_width = 0.8
width = full_width / counts.shape[0]
for i, (group, series) in enumerate(counts.iterrows()):
left = np.arange(len(self.modality_order)) + i * width
height = [series[i] if i in series else 0
for i in self.modality_order]
color = phenotype_to_color[group]
ax.bar(left, height, width=width, color=color, label=group,
linewidth=.5, edgecolor='k')
ylabel = 'Percentage of events' if percentages else 'Number of events'
ax.set_ylabel(ylabel)
ax.set_xticks(np.arange(len(self.modality_order)) + full_width / 2)
ax.set_xticklabels(self.modality_order)
ax.set_xlabel('Splicing modality')
ax.set_xlim(0, len(self.modality_order))
ax.legend(loc='best')
ax.grid(axis='y', linestyle='-', linewidth=0.5)
sns.despine()
def event_estimation(self, event, logliks, logsumexps, renamed=''):
"""Show the values underlying bayesian modality estimations of an event
Parameters
----------
Returns
-------
Raises
------
"""
plotter = _ModelLoglikPlotter()
plotter.plot(event, logliks, logsumexps, self.modality_to_color,
renamed=renamed)
return plotter
def annotate_bars(x, group_col, percentage_col, modality_col, count_col,
**kwargs):
data = kwargs.pop('data')
# print kwargs
ax = plt.gca()
width = 0.8/5.
x_base = -.49 - width/2.5
for group, group_df in data.groupby(group_col):
i = 0
modality_grouped = group_df.groupby(modality_col)
for modality in MODALITY_ORDER:
i += 1
try:
modality_df = modality_grouped.get_group(modality)
except KeyError:
continue
x_position = x_base + width*i + width/2
y_position = modality_df[percentage_col]
try:
value = modality_df[count_col].values[0]
formatted = locale.format('%d', value, grouping=True)
ax.annotate(formatted, (x_position, y_position),
textcoords='offset points', xytext=(0, 2),
ha='center', va='bottom', fontsize=12)
except IndexError:
continue
x_base += 1
def barplot(modalities_tidy, x=None, y='Percentage of Features', order=None,
hue='Assigned Modality', **factorplot_kws):
factorplot_kws.setdefault('hue_order', MODALITY_ORDER)
factorplot_kws.setdefault('palette', MODALITY_PALETTE)
factorplot_kws.setdefault('size', 3)
factorplot_kws.setdefault('aspect', 3)
factorplot_kws.setdefault('linewidth', 1)
if order is not None and x is None:
raise ValueError('If specifying "order", "x" must also '
'be specified.')
# y = 'Percentage of features'
groupby = [hue]
groupby_minus_hue = []
if x is not None:
groupby = [x] + groupby
groupby_minus_hue.append(x)
if 'row' in factorplot_kws:
groupby = groupby + [factorplot_kws['row']]
groupby_minus_hue.append(factorplot_kws['row'])
if 'col' in factorplot_kws:
groupby = groupby + [factorplot_kws['col']]
groupby_minus_hue.append(factorplot_kws['col'])
# if x is not None:
modality_counts = modalities_tidy.groupby(
groupby).size().reset_index()
modality_counts = modality_counts.rename(columns={0: 'Features'})
if groupby_minus_hue:
modality_counts[y] = modality_counts.groupby(
groupby_minus_hue)['Features'].apply(
lambda x: 100 * x / x.astype(float).sum())
else:
modality_counts[y] = 100 * modality_counts['Features']\
/ modality_counts['Features'].sum()
if order is not None:
modality_counts[x] = pd.Categorical(
modality_counts[x], categories=order,
ordered=True)
# else:
# modality_counts[y] = pd.Categorical(
# modality_counts[x], categories=order,
# ordered=True)
# else:
# modality_counts = modalities_tidy.groupby(
# hue).size().reset_index()
# modality_counts = modality_counts.rename(columns={0: 'Features'})
# modality_counts[y] = \
# 100 * modality_counts.n_events/modality_counts.n_events.sum()
if x is None:
x = ''
modality_counts[x] = x
g = sns.factorplot(y=y, x=x,
hue=hue, kind='bar', data=modality_counts,
legend=False, **factorplot_kws)
# Hacky workaround to add numeric annotations to the plot
g.map_dataframe(annotate_bars, x, group_col=x,
modality_col=hue, count_col='Features',
percentage_col=y)
g.add_legend(label_order=MODALITY_ORDER, title='Modalities')
for ax in g.axes.flat:
ax.locator_params('y', nbins=5)
if ax.is_first_col():
ax.set(ylabel=y)
return g
|
YeoLab/anchor | anchor/visualize.py | ModalitiesViz.bar | python | def bar(self, counts, phenotype_to_color=None, ax=None, percentages=True):
if percentages:
counts = 100 * (counts.T / counts.T.sum()).T
# with sns.set(style='whitegrid'):
if ax is None:
ax = plt.gca()
full_width = 0.8
width = full_width / counts.shape[0]
for i, (group, series) in enumerate(counts.iterrows()):
left = np.arange(len(self.modality_order)) + i * width
height = [series[i] if i in series else 0
for i in self.modality_order]
color = phenotype_to_color[group]
ax.bar(left, height, width=width, color=color, label=group,
linewidth=.5, edgecolor='k')
ylabel = 'Percentage of events' if percentages else 'Number of events'
ax.set_ylabel(ylabel)
ax.set_xticks(np.arange(len(self.modality_order)) + full_width / 2)
ax.set_xticklabels(self.modality_order)
ax.set_xlabel('Splicing modality')
ax.set_xlim(0, len(self.modality_order))
ax.legend(loc='best')
ax.grid(axis='y', linestyle='-', linewidth=0.5)
sns.despine() | Draw barplots grouped by modality of modality percentage per group
Parameters
----------
Returns
-------
Raises
------ | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/visualize.py#L110-L149 | null | class ModalitiesViz(object):
"""Visualize results of modality assignments"""
modality_order = MODALITY_ORDER
modality_to_color = MODALITY_TO_COLOR
modality_palette = MODALITY_PALETTE
def event_estimation(self, event, logliks, logsumexps, renamed=''):
"""Show the values underlying bayesian modality estimations of an event
Parameters
----------
Returns
-------
Raises
------
"""
plotter = _ModelLoglikPlotter()
plotter.plot(event, logliks, logsumexps, self.modality_to_color,
renamed=renamed)
return plotter
|
YeoLab/anchor | anchor/visualize.py | ModalitiesViz.event_estimation | python | def event_estimation(self, event, logliks, logsumexps, renamed=''):
plotter = _ModelLoglikPlotter()
plotter.plot(event, logliks, logsumexps, self.modality_to_color,
renamed=renamed)
return plotter | Show the values underlying bayesian modality estimations of an event
Parameters
----------
Returns
-------
Raises
------ | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/visualize.py#L151-L168 | [
"def plot(self, feature, logliks, logsumexps, log2bf_thresh, renamed=''):\n modality = logsumexps.idxmax()\n\n self.logliks = logliks\n self.logsumexps = logsumexps\n\n x = feature.to_frame()\n if feature.name is None:\n feature.name = 'Feature'\n x['sample_id'] = feature.name\n\n violin... | class ModalitiesViz(object):
"""Visualize results of modality assignments"""
modality_order = MODALITY_ORDER
modality_to_color = MODALITY_TO_COLOR
modality_palette = MODALITY_PALETTE
def bar(self, counts, phenotype_to_color=None, ax=None, percentages=True):
"""Draw barplots grouped by modality of modality percentage per group
Parameters
----------
Returns
-------
Raises
------
"""
if percentages:
counts = 100 * (counts.T / counts.T.sum()).T
# with sns.set(style='whitegrid'):
if ax is None:
ax = plt.gca()
full_width = 0.8
width = full_width / counts.shape[0]
for i, (group, series) in enumerate(counts.iterrows()):
left = np.arange(len(self.modality_order)) + i * width
height = [series[i] if i in series else 0
for i in self.modality_order]
color = phenotype_to_color[group]
ax.bar(left, height, width=width, color=color, label=group,
linewidth=.5, edgecolor='k')
ylabel = 'Percentage of events' if percentages else 'Number of events'
ax.set_ylabel(ylabel)
ax.set_xticks(np.arange(len(self.modality_order)) + full_width / 2)
ax.set_xticklabels(self.modality_order)
ax.set_xlabel('Splicing modality')
ax.set_xlim(0, len(self.modality_order))
ax.legend(loc='best')
ax.grid(axis='y', linestyle='-', linewidth=0.5)
sns.despine()
|
YeoLab/anchor | anchor/binning.py | BinnedModalities.predict | python | def predict(self, fitted):
if fitted.shape[0] != len(self.modalities):
raise ValueError("This data doesn't look like it had the distance "
"between it and the five modalities calculated")
return fitted.idxmin() | Assign the most likely modality given the fitted data
Parameters
----------
fitted : pandas.DataFrame or pandas.Series
Either a (n_modalities, features) DatFrame or (n_modalities,)
Series, either of which will return the best modality for each
feature. | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/binning.py#L37-L50 | null | class BinnedModalities(object):
modalities = MODALITY_ORDER
score_name = 'Jensen-Shannon Divergence'
def __init__(self, bins=(0, 1./3, 2./3, 1)):
if len(bins) != 4:
raise ValueError('Length of "bins" must be exactly 4 bin edges')
self.bins = bins
self.bin_ranges = bin_range_strings(self.bins)
uniform_probabilities = [stop-start for start, stop in
zip(bins, bins[1:])]
self.desired_distributions = pd.DataFrame(
np.array([[1, 0, 0], [0.5, 0, 0.5],
[0, 0, 1], [0, 1, 0], uniform_probabilities]).T,
index=self.bin_ranges, columns=self.modalities)
def fit(self, data):
binned = binify(data, bins=self.bins)
if isinstance(binned, pd.DataFrame):
fitted = binned.apply(lambda x: self.desired_distributions.apply(
lambda y: jsd(x, y)))
else:
fitted = self.desired_distributions.apply(lambda x: jsd(x, binned))
fitted.name = self.score_name
return fitted
def fit_predict(self, data):
return self.predict(self.fit(data))
|
YeoLab/anchor | anchor/model.py | ModalityModel.logliks | python | def logliks(self, x):
x = x.copy()
# Replace exactly 0 and exactly 1 values with a very small number
# (machine epsilon, the smallest number that this computer is capable
# of storing) because 0 and 1 are not in the Beta distribution.
x[x == 0] = VERY_SMALL_NUMBER
x[x == 1] = 1 - VERY_SMALL_NUMBER
return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum()
for prob, rv in
zip(self.prob_parameters, self.rvs)]) | Calculate log-likelihood of a feature x for each model
Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001
because they are out of range of the beta distribution.
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logliks : numpy.array
Log-likelihood of these data in each member of the model's family | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/model.py#L71-L97 | null | class ModalityModel(object):
"""Object to model modalities from beta distributions"""
def __init__(self, alphas, betas, ylabel='$\Psi$'):
"""Model a family of beta distributions
Parameters
----------
alphas : float or list-like
List of values for the alpha parameter of the Beta distribution. If
this is a single value (not a list), it will be assumed that this
value is constant, and will be propagated through to have as many
values as the "betas" parameter
betas : float or list-like
List of values for the alpha parameter of the Beta distribution. If
this is a single value (not a list), it will be assumed that this
value is constant, and will be propagated through to have as many
values as the "alphas" parameter
ylabel : str, optional
Name of the value you're estimating. Originally developed for
alternative splicing "percent spliced in"/"Psi" scores, the default
is the Greek letter Psi
"""
if not isinstance(alphas, Iterable) and not isinstance(betas,
Iterable):
alphas = [alphas]
betas = [betas]
self.ylabel = ylabel
self.alphas = np.array(alphas) if isinstance(alphas, Iterable) \
else np.ones(len(betas)) * alphas
self.betas = np.array(betas) if isinstance(betas, Iterable) \
else np.ones(len(alphas)) * betas
self.rvs = [stats.beta(a, b) for a, b in
zip(self.alphas, self.betas)]
self.scores = np.ones(self.alphas.shape).astype(float)
self.prob_parameters = self.scores/self.scores.sum()
def __eq__(self, other):
"""Test equality with other model"""
return np.all(self.alphas == other.alphas) \
and np.all(self.betas == other.betas) \
and np.all(self.prob_parameters == other.prob_parameters)
def __ne__(self, other):
"""Test not equality with other model"""
return not self.__eq__(other)
def single_feature_logliks(self, feature):
data = zip(self.logliks(feature), self.alphas, self.betas)
return pd.DataFrame(data, columns=SINGLE_FEATURE_COLUMNS)
def logsumexp_logliks(self, x):
"""Calculate how well this model fits these data
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logsumexp_logliks : float
Total log-likelihood of this model given this data
"""
return logsumexp(self.logliks(x))
@staticmethod
def nice_number_string(number, decimal_places=2):
"""Convert floats to either integers or a nice looking fraction"""
if number == np.round(number):
return str(int(number))
elif number < 1 and number > 0:
inverse = 1 / number
if int(inverse) == np.round(inverse):
return r'\frac{{1}}{{{}}}'.format(int(inverse))
else:
template = '{{:.{0}}}'.format(decimal_places)
return template.format(number)
def violinplot(self, n=1000, **kwargs):
"""Plot violins of each distribution in the model family
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
ax : matplotlib.Axes object
Axes object with violins plotted
"""
kwargs.setdefault('palette', 'Purples')
dfs = []
for rv in self.rvs:
psi = rv.rvs(n)
df = pd.Series(psi, name=self.ylabel).to_frame()
alpha, beta = rv.args
alpha = self.nice_number_string(alpha, decimal_places=2)
beta = self.nice_number_string(beta, decimal_places=2)
df['parameters'] = '$\\alpha = {0}$\n$\\beta = {1}$'.format(
alpha, beta)
dfs.append(df)
data = pd.concat(dfs)
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(len(self.alphas)*0.625, 4))
else:
ax = kwargs.pop('ax')
ax = violinplot(x='parameters', y=self.ylabel, data=data,
ax=ax, **kwargs)
sns.despine(ax=ax)
return ax
|
YeoLab/anchor | anchor/model.py | ModalityModel.nice_number_string | python | def nice_number_string(number, decimal_places=2):
if number == np.round(number):
return str(int(number))
elif number < 1 and number > 0:
inverse = 1 / number
if int(inverse) == np.round(inverse):
return r'\frac{{1}}{{{}}}'.format(int(inverse))
else:
template = '{{:.{0}}}'.format(decimal_places)
return template.format(number) | Convert floats to either integers or a nice looking fraction | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/model.py#L119-L129 | null | class ModalityModel(object):
"""Object to model modalities from beta distributions"""
def __init__(self, alphas, betas, ylabel='$\Psi$'):
"""Model a family of beta distributions
Parameters
----------
alphas : float or list-like
List of values for the alpha parameter of the Beta distribution. If
this is a single value (not a list), it will be assumed that this
value is constant, and will be propagated through to have as many
values as the "betas" parameter
betas : float or list-like
List of values for the alpha parameter of the Beta distribution. If
this is a single value (not a list), it will be assumed that this
value is constant, and will be propagated through to have as many
values as the "alphas" parameter
ylabel : str, optional
Name of the value you're estimating. Originally developed for
alternative splicing "percent spliced in"/"Psi" scores, the default
is the Greek letter Psi
"""
if not isinstance(alphas, Iterable) and not isinstance(betas,
Iterable):
alphas = [alphas]
betas = [betas]
self.ylabel = ylabel
self.alphas = np.array(alphas) if isinstance(alphas, Iterable) \
else np.ones(len(betas)) * alphas
self.betas = np.array(betas) if isinstance(betas, Iterable) \
else np.ones(len(alphas)) * betas
self.rvs = [stats.beta(a, b) for a, b in
zip(self.alphas, self.betas)]
self.scores = np.ones(self.alphas.shape).astype(float)
self.prob_parameters = self.scores/self.scores.sum()
def __eq__(self, other):
"""Test equality with other model"""
return np.all(self.alphas == other.alphas) \
and np.all(self.betas == other.betas) \
and np.all(self.prob_parameters == other.prob_parameters)
def __ne__(self, other):
"""Test not equality with other model"""
return not self.__eq__(other)
def logliks(self, x):
"""Calculate log-likelihood of a feature x for each model
Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001
because they are out of range of the beta distribution.
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logliks : numpy.array
Log-likelihood of these data in each member of the model's family
"""
x = x.copy()
# Replace exactly 0 and exactly 1 values with a very small number
# (machine epsilon, the smallest number that this computer is capable
# of storing) because 0 and 1 are not in the Beta distribution.
x[x == 0] = VERY_SMALL_NUMBER
x[x == 1] = 1 - VERY_SMALL_NUMBER
return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum()
for prob, rv in
zip(self.prob_parameters, self.rvs)])
def single_feature_logliks(self, feature):
data = zip(self.logliks(feature), self.alphas, self.betas)
return pd.DataFrame(data, columns=SINGLE_FEATURE_COLUMNS)
def logsumexp_logliks(self, x):
"""Calculate how well this model fits these data
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logsumexp_logliks : float
Total log-likelihood of this model given this data
"""
return logsumexp(self.logliks(x))
@staticmethod
def violinplot(self, n=1000, **kwargs):
"""Plot violins of each distribution in the model family
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
ax : matplotlib.Axes object
Axes object with violins plotted
"""
kwargs.setdefault('palette', 'Purples')
dfs = []
for rv in self.rvs:
psi = rv.rvs(n)
df = pd.Series(psi, name=self.ylabel).to_frame()
alpha, beta = rv.args
alpha = self.nice_number_string(alpha, decimal_places=2)
beta = self.nice_number_string(beta, decimal_places=2)
df['parameters'] = '$\\alpha = {0}$\n$\\beta = {1}$'.format(
alpha, beta)
dfs.append(df)
data = pd.concat(dfs)
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(len(self.alphas)*0.625, 4))
else:
ax = kwargs.pop('ax')
ax = violinplot(x='parameters', y=self.ylabel, data=data,
ax=ax, **kwargs)
sns.despine(ax=ax)
return ax
|
YeoLab/anchor | anchor/model.py | ModalityModel.violinplot | python | def violinplot(self, n=1000, **kwargs):
kwargs.setdefault('palette', 'Purples')
dfs = []
for rv in self.rvs:
psi = rv.rvs(n)
df = pd.Series(psi, name=self.ylabel).to_frame()
alpha, beta = rv.args
alpha = self.nice_number_string(alpha, decimal_places=2)
beta = self.nice_number_string(beta, decimal_places=2)
df['parameters'] = '$\\alpha = {0}$\n$\\beta = {1}$'.format(
alpha, beta)
dfs.append(df)
data = pd.concat(dfs)
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(len(self.alphas)*0.625, 4))
else:
ax = kwargs.pop('ax')
ax = violinplot(x='parameters', y=self.ylabel, data=data,
ax=ax, **kwargs)
sns.despine(ax=ax)
return ax | Plot violins of each distribution in the model family
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
ax : matplotlib.Axes object
Axes object with violins plotted | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/model.py#L131-L169 | [
"def violinplot(x=None, y=None, data=None, bw=0.2, scale='width',\n inner=None, ax=None, **kwargs):\n \"\"\"Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data\n\n What's different:\n - bw = 0.2: Sets bandwidth to be small and the same between datasets\n - scale = 'widt... | class ModalityModel(object):
"""Object to model modalities from beta distributions"""
def __init__(self, alphas, betas, ylabel='$\Psi$'):
"""Model a family of beta distributions
Parameters
----------
alphas : float or list-like
List of values for the alpha parameter of the Beta distribution. If
this is a single value (not a list), it will be assumed that this
value is constant, and will be propagated through to have as many
values as the "betas" parameter
betas : float or list-like
List of values for the alpha parameter of the Beta distribution. If
this is a single value (not a list), it will be assumed that this
value is constant, and will be propagated through to have as many
values as the "alphas" parameter
ylabel : str, optional
Name of the value you're estimating. Originally developed for
alternative splicing "percent spliced in"/"Psi" scores, the default
is the Greek letter Psi
"""
if not isinstance(alphas, Iterable) and not isinstance(betas,
Iterable):
alphas = [alphas]
betas = [betas]
self.ylabel = ylabel
self.alphas = np.array(alphas) if isinstance(alphas, Iterable) \
else np.ones(len(betas)) * alphas
self.betas = np.array(betas) if isinstance(betas, Iterable) \
else np.ones(len(alphas)) * betas
self.rvs = [stats.beta(a, b) for a, b in
zip(self.alphas, self.betas)]
self.scores = np.ones(self.alphas.shape).astype(float)
self.prob_parameters = self.scores/self.scores.sum()
def __eq__(self, other):
"""Test equality with other model"""
return np.all(self.alphas == other.alphas) \
and np.all(self.betas == other.betas) \
and np.all(self.prob_parameters == other.prob_parameters)
def __ne__(self, other):
"""Test not equality with other model"""
return not self.__eq__(other)
def logliks(self, x):
"""Calculate log-likelihood of a feature x for each model
Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001
because they are out of range of the beta distribution.
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logliks : numpy.array
Log-likelihood of these data in each member of the model's family
"""
x = x.copy()
# Replace exactly 0 and exactly 1 values with a very small number
# (machine epsilon, the smallest number that this computer is capable
# of storing) because 0 and 1 are not in the Beta distribution.
x[x == 0] = VERY_SMALL_NUMBER
x[x == 1] = 1 - VERY_SMALL_NUMBER
return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum()
for prob, rv in
zip(self.prob_parameters, self.rvs)])
def single_feature_logliks(self, feature):
data = zip(self.logliks(feature), self.alphas, self.betas)
return pd.DataFrame(data, columns=SINGLE_FEATURE_COLUMNS)
def logsumexp_logliks(self, x):
"""Calculate how well this model fits these data
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logsumexp_logliks : float
Total log-likelihood of this model given this data
"""
return logsumexp(self.logliks(x))
@staticmethod
def nice_number_string(number, decimal_places=2):
"""Convert floats to either integers or a nice looking fraction"""
if number == np.round(number):
return str(int(number))
elif number < 1 and number > 0:
inverse = 1 / number
if int(inverse) == np.round(inverse):
return r'\frac{{1}}{{{}}}'.format(int(inverse))
else:
template = '{{:.{0}}}'.format(decimal_places)
return template.format(number)
|
YeoLab/anchor | anchor/bayesian.py | BayesianModalities._single_feature_logliks_one_step | python | def _single_feature_logliks_one_step(self, feature, models):
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True) | Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L54-L77 | null | class BayesianModalities(object):
"""Use Bayesian methods to estimate modalities of splicing events"""
score_name = '$\log_2 K$'
def __init__(self, one_parameter_models=ONE_PARAMETER_MODELS,
two_parameter_models=TWO_PARAMETER_MODELS,
logbf_thresh=10):
"""Initialize an object with models to estimate splicing modality
Parameters
----------
step : float
Distance between parameter values
vmax : float
Maximum parameter value
logbf_thresh : float
Minimum threshold at which the bayes factor difference is defined
to be significant
"""
self.logbf_thresh = logbf_thresh
# self.modality_to_cmap = modality_to_cmap
self.one_param_models = {k: ModalityModel(**v)
for k, v in one_parameter_models.items()}
self.two_param_models = {k: ModalityModel(**v)
for k, v in two_parameter_models.items()}
self.models = self.one_param_models.copy()
self.models.update(self.two_param_models)
@staticmethod
def assert_non_negative(x):
"""Ensure all values are greater than zero
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` is less than 0
"""
assert np.all(x[np.isfinite(x)] >= 0)
@staticmethod
def assert_less_than_or_equal_1(x):
"""Ensure all values are less than 1
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` are greater than 1
"""
assert np.all(x[np.isfinite(x)] <= 1)
def fit(self, data):
"""Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors
def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
"""
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax()
def fit_predict(self, data):
"""Convenience function to assign modalities directly from data"""
return self.predict(self.fit(data))
def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks
@staticmethod
def logliks_to_logsumexp(logliks):
return logliks.groupby('Modality')[r'$\log$ Likelihood'].apply(
logsumexp)
def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality"""
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series
def plot_single_feature_calculation(self, feature, renamed=''):
if np.isfinite(feature).sum() == 0:
raise ValueError('The feature has no finite values')
logliks = self.single_feature_logliks(feature)
logsumexps = self.logliks_to_logsumexp(logliks)
logsumexps[NULL_MODEL] = self.logbf_thresh
plotter = _ModelLoglikPlotter()
return plotter.plot(feature, logliks, logsumexps, self.logbf_thresh,
renamed=renamed)
def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
"""
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout()
|
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.fit | python | def fit(self, data):
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors | Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1. | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L111-L139 | [
"def assert_non_negative(x):\n \"\"\"Ensure all values are greater than zero\n\n Parameters\n ----------\n x : array_like\n A numpy array\n\n Raises\n ------\n AssertionError\n If any value in ``x`` is less than 0\n \"\"\"\n assert np.all(x[np.isfinite(x)] >= 0)\n",
"def a... | class BayesianModalities(object):
"""Use Bayesian methods to estimate modalities of splicing events"""
score_name = '$\log_2 K$'
def __init__(self, one_parameter_models=ONE_PARAMETER_MODELS,
two_parameter_models=TWO_PARAMETER_MODELS,
logbf_thresh=10):
"""Initialize an object with models to estimate splicing modality
Parameters
----------
step : float
Distance between parameter values
vmax : float
Maximum parameter value
logbf_thresh : float
Minimum threshold at which the bayes factor difference is defined
to be significant
"""
self.logbf_thresh = logbf_thresh
# self.modality_to_cmap = modality_to_cmap
self.one_param_models = {k: ModalityModel(**v)
for k, v in one_parameter_models.items()}
self.two_param_models = {k: ModalityModel(**v)
for k, v in two_parameter_models.items()}
self.models = self.one_param_models.copy()
self.models.update(self.two_param_models)
def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
"""
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True)
@staticmethod
def assert_non_negative(x):
"""Ensure all values are greater than zero
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` is less than 0
"""
assert np.all(x[np.isfinite(x)] >= 0)
@staticmethod
def assert_less_than_or_equal_1(x):
"""Ensure all values are less than 1
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` are greater than 1
"""
assert np.all(x[np.isfinite(x)] <= 1)
def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
"""
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax()
def fit_predict(self, data):
"""Convenience function to assign modalities directly from data"""
return self.predict(self.fit(data))
def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks
@staticmethod
def logliks_to_logsumexp(logliks):
return logliks.groupby('Modality')[r'$\log$ Likelihood'].apply(
logsumexp)
def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality"""
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series
def plot_single_feature_calculation(self, feature, renamed=''):
if np.isfinite(feature).sum() == 0:
raise ValueError('The feature has no finite values')
logliks = self.single_feature_logliks(feature)
logsumexps = self.logliks_to_logsumexp(logliks)
logsumexps[NULL_MODEL] = self.logbf_thresh
plotter = _ModelLoglikPlotter()
return plotter.plot(feature, logliks, logsumexps, self.logbf_thresh,
renamed=renamed)
def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
"""
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout()
|
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.predict | python | def predict(self, log2_bayes_factors, reset_index=False):
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax() | Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L141-L178 | null | class BayesianModalities(object):
"""Use Bayesian methods to estimate modalities of splicing events"""
score_name = '$\log_2 K$'
def __init__(self, one_parameter_models=ONE_PARAMETER_MODELS,
two_parameter_models=TWO_PARAMETER_MODELS,
logbf_thresh=10):
"""Initialize an object with models to estimate splicing modality
Parameters
----------
step : float
Distance between parameter values
vmax : float
Maximum parameter value
logbf_thresh : float
Minimum threshold at which the bayes factor difference is defined
to be significant
"""
self.logbf_thresh = logbf_thresh
# self.modality_to_cmap = modality_to_cmap
self.one_param_models = {k: ModalityModel(**v)
for k, v in one_parameter_models.items()}
self.two_param_models = {k: ModalityModel(**v)
for k, v in two_parameter_models.items()}
self.models = self.one_param_models.copy()
self.models.update(self.two_param_models)
def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
"""
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True)
@staticmethod
def assert_non_negative(x):
"""Ensure all values are greater than zero
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` is less than 0
"""
assert np.all(x[np.isfinite(x)] >= 0)
@staticmethod
def assert_less_than_or_equal_1(x):
"""Ensure all values are less than 1
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` are greater than 1
"""
assert np.all(x[np.isfinite(x)] <= 1)
def fit(self, data):
"""Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors
def fit_predict(self, data):
"""Convenience function to assign modalities directly from data"""
return self.predict(self.fit(data))
def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks
@staticmethod
def logliks_to_logsumexp(logliks):
return logliks.groupby('Modality')[r'$\log$ Likelihood'].apply(
logsumexp)
def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality"""
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series
def plot_single_feature_calculation(self, feature, renamed=''):
if np.isfinite(feature).sum() == 0:
raise ValueError('The feature has no finite values')
logliks = self.single_feature_logliks(feature)
logsumexps = self.logliks_to_logsumexp(logliks)
logsumexps[NULL_MODEL] = self.logbf_thresh
plotter = _ModelLoglikPlotter()
return plotter.plot(feature, logliks, logsumexps, self.logbf_thresh,
renamed=renamed)
def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
"""
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout()
|
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.single_feature_logliks | python | def single_feature_logliks(self, feature):
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks | Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1. | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L184-L218 | [
"def _single_feature_logliks_one_step(self, feature, models):\n \"\"\"Get log-likelihood of models at each parameterization for given data\n\n Parameters\n ----------\n feature : pandas.Series\n Percent-based values of a single feature. May contain NAs, but only\n non-NA values are used.\n... | class BayesianModalities(object):
"""Use Bayesian methods to estimate modalities of splicing events"""
score_name = '$\log_2 K$'
def __init__(self, one_parameter_models=ONE_PARAMETER_MODELS,
two_parameter_models=TWO_PARAMETER_MODELS,
logbf_thresh=10):
"""Initialize an object with models to estimate splicing modality
Parameters
----------
step : float
Distance between parameter values
vmax : float
Maximum parameter value
logbf_thresh : float
Minimum threshold at which the bayes factor difference is defined
to be significant
"""
self.logbf_thresh = logbf_thresh
# self.modality_to_cmap = modality_to_cmap
self.one_param_models = {k: ModalityModel(**v)
for k, v in one_parameter_models.items()}
self.two_param_models = {k: ModalityModel(**v)
for k, v in two_parameter_models.items()}
self.models = self.one_param_models.copy()
self.models.update(self.two_param_models)
def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
"""
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True)
@staticmethod
def assert_non_negative(x):
"""Ensure all values are greater than zero
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` is less than 0
"""
assert np.all(x[np.isfinite(x)] >= 0)
@staticmethod
def assert_less_than_or_equal_1(x):
"""Ensure all values are less than 1
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` are greater than 1
"""
assert np.all(x[np.isfinite(x)] <= 1)
def fit(self, data):
"""Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors
def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
"""
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax()
def fit_predict(self, data):
"""Convenience function to assign modalities directly from data"""
return self.predict(self.fit(data))
@staticmethod
def logliks_to_logsumexp(logliks):
return logliks.groupby('Modality')[r'$\log$ Likelihood'].apply(
logsumexp)
def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality"""
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series
def plot_single_feature_calculation(self, feature, renamed=''):
if np.isfinite(feature).sum() == 0:
raise ValueError('The feature has no finite values')
logliks = self.single_feature_logliks(feature)
logsumexps = self.logliks_to_logsumexp(logliks)
logsumexps[NULL_MODEL] = self.logbf_thresh
plotter = _ModelLoglikPlotter()
return plotter.plot(feature, logliks, logsumexps, self.logbf_thresh,
renamed=renamed)
def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
"""
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout()
|
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.single_feature_fit | python | def single_feature_fit(self, feature):
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series | Get the log2 bayes factor of the fit for each modality | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L225-L245 | null | class BayesianModalities(object):
"""Use Bayesian methods to estimate modalities of splicing events"""
score_name = '$\log_2 K$'
def __init__(self, one_parameter_models=ONE_PARAMETER_MODELS,
two_parameter_models=TWO_PARAMETER_MODELS,
logbf_thresh=10):
"""Initialize an object with models to estimate splicing modality
Parameters
----------
step : float
Distance between parameter values
vmax : float
Maximum parameter value
logbf_thresh : float
Minimum threshold at which the bayes factor difference is defined
to be significant
"""
self.logbf_thresh = logbf_thresh
# self.modality_to_cmap = modality_to_cmap
self.one_param_models = {k: ModalityModel(**v)
for k, v in one_parameter_models.items()}
self.two_param_models = {k: ModalityModel(**v)
for k, v in two_parameter_models.items()}
self.models = self.one_param_models.copy()
self.models.update(self.two_param_models)
def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
"""
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True)
@staticmethod
def assert_non_negative(x):
"""Ensure all values are greater than zero
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` is less than 0
"""
assert np.all(x[np.isfinite(x)] >= 0)
@staticmethod
def assert_less_than_or_equal_1(x):
"""Ensure all values are less than 1
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` are greater than 1
"""
assert np.all(x[np.isfinite(x)] <= 1)
def fit(self, data):
"""Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors
def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
"""
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax()
def fit_predict(self, data):
"""Convenience function to assign modalities directly from data"""
return self.predict(self.fit(data))
def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks
@staticmethod
def logliks_to_logsumexp(logliks):
return logliks.groupby('Modality')[r'$\log$ Likelihood'].apply(
logsumexp)
def plot_single_feature_calculation(self, feature, renamed=''):
if np.isfinite(feature).sum() == 0:
raise ValueError('The feature has no finite values')
logliks = self.single_feature_logliks(feature)
logsumexps = self.logliks_to_logsumexp(logliks)
logsumexps[NULL_MODEL] = self.logbf_thresh
plotter = _ModelLoglikPlotter()
return plotter.plot(feature, logliks, logsumexps, self.logbf_thresh,
renamed=renamed)
def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
"""
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout()
|
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.violinplot | python | def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
"""
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout() | r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L258-L291 | null | class BayesianModalities(object):
"""Use Bayesian methods to estimate modalities of splicing events"""
score_name = '$\log_2 K$'
def __init__(self, one_parameter_models=ONE_PARAMETER_MODELS,
two_parameter_models=TWO_PARAMETER_MODELS,
logbf_thresh=10):
"""Initialize an object with models to estimate splicing modality
Parameters
----------
step : float
Distance between parameter values
vmax : float
Maximum parameter value
logbf_thresh : float
Minimum threshold at which the bayes factor difference is defined
to be significant
"""
self.logbf_thresh = logbf_thresh
# self.modality_to_cmap = modality_to_cmap
self.one_param_models = {k: ModalityModel(**v)
for k, v in one_parameter_models.items()}
self.two_param_models = {k: ModalityModel(**v)
for k, v in two_parameter_models.items()}
self.models = self.one_param_models.copy()
self.models.update(self.two_param_models)
def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
"""
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True)
@staticmethod
def assert_non_negative(x):
"""Ensure all values are greater than zero
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` is less than 0
"""
assert np.all(x[np.isfinite(x)] >= 0)
@staticmethod
def assert_less_than_or_equal_1(x):
"""Ensure all values are less than 1
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` are greater than 1
"""
assert np.all(x[np.isfinite(x)] <= 1)
def fit(self, data):
"""Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors
def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
"""
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax()
def fit_predict(self, data):
"""Convenience function to assign modalities directly from data"""
return self.predict(self.fit(data))
def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks
@staticmethod
def logliks_to_logsumexp(logliks):
return logliks.groupby('Modality')[r'$\log$ Likelihood'].apply(
logsumexp)
def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality"""
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series
def plot_single_feature_calculation(self, feature, renamed=''):
if np.isfinite(feature).sum() == 0:
raise ValueError('The feature has no finite values')
logliks = self.single_feature_logliks(feature)
logsumexps = self.logliks_to_logsumexp(logliks)
logsumexps[NULL_MODEL] = self.logbf_thresh
plotter = _ModelLoglikPlotter()
return plotter.plot(feature, logliks, logsumexps, self.logbf_thresh,
renamed=renamed)
|
YeoLab/anchor | anchor/infotheory.py | bin_range_strings | python | def bin_range_strings(bins, fmt=':g'):
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])] | Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1'] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L12-L29 | null | """
Information-theoretic calculations
"""
import numpy as np
import pandas as pd
from sklearn import cross_validation
EPSILON = 100 * np.finfo(float).eps
def _check_prob_dist(x):
if np.any(x < 0):
raise ValueError('Each column of the input dataframes must be '
'**non-negative** probability distributions')
try:
if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
except IndexError:
if np.any(np.abs(x.sum() - 1) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0)
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0)
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1)
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1)
|
YeoLab/anchor | anchor/infotheory.py | binify | python | def binify(data, bins):
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned | Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L46-L77 | [
"def bin_range_strings(bins, fmt=':g'):\n \"\"\"Given a list of bins, make a list of strings of those bin ranges\n\n Parameters\n ----------\n bins : list_like\n List of anything, usually values of bin edges\n\n Returns\n -------\n bin_ranges : list\n List of bin ranges\n\n >>>... | """
Information-theoretic calculations
"""
import numpy as np
import pandas as pd
from sklearn import cross_validation
EPSILON = 100 * np.finfo(float).eps
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])]
def _check_prob_dist(x):
if np.any(x < 0):
raise ValueError('Each column of the input dataframes must be '
'**non-negative** probability distributions')
try:
if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
except IndexError:
if np.any(np.abs(x.sum() - 1) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0)
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0)
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1)
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1)
|
YeoLab/anchor | anchor/infotheory.py | kld | python | def kld(p, q):
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0) | Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense. | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L80-L119 | [
"def _check_prob_dist(x):\n if np.any(x < 0):\n raise ValueError('Each column of the input dataframes must be '\n '**non-negative** probability distributions')\n try:\n if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):\n raise ValueError('Each column ... | """
Information-theoretic calculations
"""
import numpy as np
import pandas as pd
from sklearn import cross_validation
EPSILON = 100 * np.finfo(float).eps
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])]
def _check_prob_dist(x):
if np.any(x < 0):
raise ValueError('Each column of the input dataframes must be '
'**non-negative** probability distributions')
try:
if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
except IndexError:
if np.any(np.abs(x.sum() - 1) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0)
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1)
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1)
|
YeoLab/anchor | anchor/infotheory.py | jsd | python | def jsd(p, q):
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result | Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L122-L157 | [
"def _check_prob_dist(x):\n if np.any(x < 0):\n raise ValueError('Each column of the input dataframes must be '\n '**non-negative** probability distributions')\n try:\n if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):\n raise ValueError('Each column ... | """
Information-theoretic calculations
"""
import numpy as np
import pandas as pd
from sklearn import cross_validation
EPSILON = 100 * np.finfo(float).eps
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])]
def _check_prob_dist(x):
if np.any(x < 0):
raise ValueError('Each column of the input dataframes must be '
'**non-negative** probability distributions')
try:
if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
except IndexError:
if np.any(np.abs(x.sum() - 1) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0)
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0)
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1)
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1)
|
YeoLab/anchor | anchor/infotheory.py | entropy | python | def entropy(binned, base=2):
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0) | Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L160-L187 | [
"def _check_prob_dist(x):\n if np.any(x < 0):\n raise ValueError('Each column of the input dataframes must be '\n '**non-negative** probability distributions')\n try:\n if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):\n raise ValueError('Each column ... | """
Information-theoretic calculations
"""
import numpy as np
import pandas as pd
from sklearn import cross_validation
EPSILON = 100 * np.finfo(float).eps
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])]
def _check_prob_dist(x):
if np.any(x < 0):
raise ValueError('Each column of the input dataframes must be '
'**non-negative** probability distributions')
try:
if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
except IndexError:
if np.any(np.abs(x.sum() - 1) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0)
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1)
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1)
|
YeoLab/anchor | anchor/infotheory.py | binify_and_jsd | python | def binify_and_jsd(df1, df2, bins, pair=None):
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series | Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2 | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L190-L215 | [
"def binify(data, bins):\n \"\"\"Makes a histogram of each column the provided binsize\n\n Parameters\n ----------\n data : pandas.DataFrame\n A samples x features dataframe. Each feature (column) will be binned\n into the provided bins\n bins : iterable\n Bins you would like to ... | """
Information-theoretic calculations
"""
import numpy as np
import pandas as pd
from sklearn import cross_validation
EPSILON = 100 * np.finfo(float).eps
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])]
def _check_prob_dist(x):
if np.any(x < 0):
raise ValueError('Each column of the input dataframes must be '
'**non-negative** probability distributions')
try:
if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
except IndexError:
if np.any(np.abs(x.sum() - 1) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0)
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0)
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1)
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1)
|
YeoLab/anchor | anchor/infotheory.py | cross_phenotype_jsd | python | def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1) | Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L218-L266 | [
"def binify_and_jsd(df1, df2, bins, pair=None):\n \"\"\"Binify and calculate jensen-shannon divergence between two dataframes\n\n Parameters\n ----------\n df1, df2 : pandas.DataFrames\n Dataframes to calculate JSD between columns of. Must have overlapping\n column names\n bins : array-... | """
Information-theoretic calculations
"""
import numpy as np
import pandas as pd
from sklearn import cross_validation
EPSILON = 100 * np.finfo(float).eps
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])]
def _check_prob_dist(x):
if np.any(x < 0):
raise ValueError('Each column of the input dataframes must be '
'**non-negative** probability distributions')
try:
if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
except IndexError:
if np.any(np.abs(x.sum() - 1) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0)
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0)
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1)
|
YeoLab/anchor | anchor/infotheory.py | jsd_df_to_2d | python | def jsd_df_to_2d(jsd_df):
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1) | Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L269-L289 | null | """
Information-theoretic calculations
"""
import numpy as np
import pandas as pd
from sklearn import cross_validation
EPSILON = 100 * np.finfo(float).eps
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])]
def _check_prob_dist(x):
if np.any(x < 0):
raise ValueError('Each column of the input dataframes must be '
'**non-negative** probability distributions')
try:
if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
except IndexError:
if np.any(np.abs(x.sum() - 1) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0)
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0)
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1)
|
cuducos/getgist | getgist/__main__.py | run_getgist | python | def run_getgist(filename, user, **kwargs):
assume_yes = kwargs.get("yes_to_all")
getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes)
getgist.get() | Passes user inputs to GetGist() and calls get() | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L107-L111 | [
"def get(self):\n \"\"\"Reads the remote file from Gist and save it locally\"\"\"\n if self.gist:\n content = self.github.read_gist_file(self.gist)\n self.local.save(content)\n"
] | from os import getenv
from click import argument, command, option
from getgist.github import GitHubTools
from getgist.local import LocalTools
GETGIST_DESC = """
GetGist downloads any file from a GitHub Gist, with one single command.
Usage: `getgist <GitHub username> <file name from any file inside a gist>`.
If you set GETGIST_USER envvar with your GitHub username, you can use the
shortcut `geymy <file name>` (see `getmy --help` for details).
If you set GETGIST_TOKEN envvar with your personal access token (see
https://github.com/settings/tokens for details) you can get get priavte
gists from your account and you can upload local changes to your gist repo
(see `putmy --help` for details).
"""
GETMY_DESC = """
Call `getgist` assuming the user is set in an envvar called GETGIST_USER.
See `getgist --help` for more details.
"""
PUTGIST_DESC = """
PutGist uploads any file to a GitHub Gist, with one single command.
Usage: `putgist <GitHub username> <file name>`.
You have to set the GETGIST_TOKEN envvar with your personal access token
(see https://github.com/settings/tokens for details).
If you set GETGIST_USER envvar with your GitHub username, you can use the
shortcut `putmy <file name>` (see `getmy --help` for details).
"""
PUTMY_DESC = """
Call `putgist` assuming the user is set in an envvar called GETGIST_USER.
See `putgist --help` for more details.
"""
class GetGist(object):
"""
Main GetGist objects linking inputs from the CLI to the helpers from
GitHubTools (to deal with the API) and LocalTools (to deal with the local
file system.
"""
def __init__(self, **kwargs):
"""
Instantiate GitHubTools & LocalTools, and set the variables required
to get, create or update gists (filename and public/private flag)
:param user: (str) GitHub username
:param filename: (str) name of file from any Gist or local file system
:param allow_none: (bool) flag to use GitHubTools.select_gist
differently with `getgist` and `putgist` commands (if no gist/filename
is found it raises an error for `getgist`, or sets `putgist` to create
a new gist).
:param create_private: (bool) create a new gist as private
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
# get arguments
user = kwargs.get("user")
allow_none = kwargs.get("allow_none", False)
assume_yes = kwargs.get("assume_yes", False)
filename = kwargs.get("filename")
self.public = not kwargs.get("create_private", False)
# instantiate local tools & check for user
self.local = LocalTools(filename, assume_yes)
if not user:
message = """
No default user set yet. To avoid this prompt set an
environmental variable called `GETGIST_USER`.'
"""
self.local.oops(message)
# instantiate filename, guthub tools and fetch gist
self.github = GitHubTools(user, filename, assume_yes)
self.gist = self.github.select_gist(allow_none)
def get(self):
"""Reads the remote file from Gist and save it locally"""
if self.gist:
content = self.github.read_gist_file(self.gist)
self.local.save(content)
def put(self):
""" Reads local file & update the remote gist (or create a new one)"""
content = self.local.read()
if self.gist:
self.github.update(self.gist, content)
else:
self.github.create(content, public=self.public)
@command(help=GETGIST_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@argument("user")
@argument("filename")
@command(help=GETMY_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@argument("filename")
def run_getmy(filename, **kwargs):
"""Shortcut for run_getgist() reading username from env var"""
assume_yes = kwargs.get("yes_to_all")
user = getenv("GETGIST_USER")
getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes)
getgist.get()
@command(help=PUTGIST_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@option("--private", "-p", is_flag=True, help="Crete new gist as private")
@argument("user")
@argument("filename")
def run_putgist(filename, user, **kwargs):
"""Passes user inputs to GetGist() and calls put()"""
assume_yes = kwargs.get("yes_to_all")
private = kwargs.get("private")
getgist = GetGist(
user=user,
filename=filename,
assume_yes=assume_yes,
create_private=private,
allow_none=True,
)
getgist.put()
@command(help=PUTMY_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@option("--private", "-p", is_flag=True, help="Crete new gist as private")
@argument("filename")
def run_putmy(filename, **kwargs):
"""Shortcut for run_putgist() reading username from env var"""
assume_yes = kwargs.get("yes_to_all")
private = kwargs.get("private")
user = getenv("GETGIST_USER")
getgist = GetGist(
user=user,
filename=filename,
assume_yes=assume_yes,
create_private=private,
allow_none=True,
)
getgist.put()
|
cuducos/getgist | getgist/__main__.py | run_getmy | python | def run_getmy(filename, **kwargs):
assume_yes = kwargs.get("yes_to_all")
user = getenv("GETGIST_USER")
getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes)
getgist.get() | Shortcut for run_getgist() reading username from env var | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L117-L122 | [
"def get(self):\n \"\"\"Reads the remote file from Gist and save it locally\"\"\"\n if self.gist:\n content = self.github.read_gist_file(self.gist)\n self.local.save(content)\n"
] | from os import getenv
from click import argument, command, option
from getgist.github import GitHubTools
from getgist.local import LocalTools
GETGIST_DESC = """
GetGist downloads any file from a GitHub Gist, with one single command.
Usage: `getgist <GitHub username> <file name from any file inside a gist>`.
If you set GETGIST_USER envvar with your GitHub username, you can use the
shortcut `geymy <file name>` (see `getmy --help` for details).
If you set GETGIST_TOKEN envvar with your personal access token (see
https://github.com/settings/tokens for details) you can get get priavte
gists from your account and you can upload local changes to your gist repo
(see `putmy --help` for details).
"""
GETMY_DESC = """
Call `getgist` assuming the user is set in an envvar called GETGIST_USER.
See `getgist --help` for more details.
"""
PUTGIST_DESC = """
PutGist uploads any file to a GitHub Gist, with one single command.
Usage: `putgist <GitHub username> <file name>`.
You have to set the GETGIST_TOKEN envvar with your personal access token
(see https://github.com/settings/tokens for details).
If you set GETGIST_USER envvar with your GitHub username, you can use the
shortcut `putmy <file name>` (see `getmy --help` for details).
"""
PUTMY_DESC = """
Call `putgist` assuming the user is set in an envvar called GETGIST_USER.
See `putgist --help` for more details.
"""
class GetGist(object):
"""
Main GetGist objects linking inputs from the CLI to the helpers from
GitHubTools (to deal with the API) and LocalTools (to deal with the local
file system.
"""
def __init__(self, **kwargs):
"""
Instantiate GitHubTools & LocalTools, and set the variables required
to get, create or update gists (filename and public/private flag)
:param user: (str) GitHub username
:param filename: (str) name of file from any Gist or local file system
:param allow_none: (bool) flag to use GitHubTools.select_gist
differently with `getgist` and `putgist` commands (if no gist/filename
is found it raises an error for `getgist`, or sets `putgist` to create
a new gist).
:param create_private: (bool) create a new gist as private
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
# get arguments
user = kwargs.get("user")
allow_none = kwargs.get("allow_none", False)
assume_yes = kwargs.get("assume_yes", False)
filename = kwargs.get("filename")
self.public = not kwargs.get("create_private", False)
# instantiate local tools & check for user
self.local = LocalTools(filename, assume_yes)
if not user:
message = """
No default user set yet. To avoid this prompt set an
environmental variable called `GETGIST_USER`.'
"""
self.local.oops(message)
# instantiate filename, guthub tools and fetch gist
self.github = GitHubTools(user, filename, assume_yes)
self.gist = self.github.select_gist(allow_none)
def get(self):
"""Reads the remote file from Gist and save it locally"""
if self.gist:
content = self.github.read_gist_file(self.gist)
self.local.save(content)
def put(self):
""" Reads local file & update the remote gist (or create a new one)"""
content = self.local.read()
if self.gist:
self.github.update(self.gist, content)
else:
self.github.create(content, public=self.public)
@command(help=GETGIST_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@argument("user")
@argument("filename")
def run_getgist(filename, user, **kwargs):
"""Passes user inputs to GetGist() and calls get()"""
assume_yes = kwargs.get("yes_to_all")
getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes)
getgist.get()
@command(help=GETMY_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@argument("filename")
@command(help=PUTGIST_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@option("--private", "-p", is_flag=True, help="Crete new gist as private")
@argument("user")
@argument("filename")
def run_putgist(filename, user, **kwargs):
"""Passes user inputs to GetGist() and calls put()"""
assume_yes = kwargs.get("yes_to_all")
private = kwargs.get("private")
getgist = GetGist(
user=user,
filename=filename,
assume_yes=assume_yes,
create_private=private,
allow_none=True,
)
getgist.put()
@command(help=PUTMY_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@option("--private", "-p", is_flag=True, help="Crete new gist as private")
@argument("filename")
def run_putmy(filename, **kwargs):
"""Shortcut for run_putgist() reading username from env var"""
assume_yes = kwargs.get("yes_to_all")
private = kwargs.get("private")
user = getenv("GETGIST_USER")
getgist = GetGist(
user=user,
filename=filename,
assume_yes=assume_yes,
create_private=private,
allow_none=True,
)
getgist.put()
|
cuducos/getgist | getgist/__main__.py | run_putgist | python | def run_putgist(filename, user, **kwargs):
assume_yes = kwargs.get("yes_to_all")
private = kwargs.get("private")
getgist = GetGist(
user=user,
filename=filename,
assume_yes=assume_yes,
create_private=private,
allow_none=True,
)
getgist.put() | Passes user inputs to GetGist() and calls put() | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L130-L141 | [
"def put(self):\n \"\"\" Reads local file & update the remote gist (or create a new one)\"\"\"\n content = self.local.read()\n if self.gist:\n self.github.update(self.gist, content)\n else:\n self.github.create(content, public=self.public)\n"
] | from os import getenv
from click import argument, command, option
from getgist.github import GitHubTools
from getgist.local import LocalTools
GETGIST_DESC = """
GetGist downloads any file from a GitHub Gist, with one single command.
Usage: `getgist <GitHub username> <file name from any file inside a gist>`.
If you set GETGIST_USER envvar with your GitHub username, you can use the
shortcut `geymy <file name>` (see `getmy --help` for details).
If you set GETGIST_TOKEN envvar with your personal access token (see
https://github.com/settings/tokens for details) you can get get priavte
gists from your account and you can upload local changes to your gist repo
(see `putmy --help` for details).
"""
GETMY_DESC = """
Call `getgist` assuming the user is set in an envvar called GETGIST_USER.
See `getgist --help` for more details.
"""
PUTGIST_DESC = """
PutGist uploads any file to a GitHub Gist, with one single command.
Usage: `putgist <GitHub username> <file name>`.
You have to set the GETGIST_TOKEN envvar with your personal access token
(see https://github.com/settings/tokens for details).
If you set GETGIST_USER envvar with your GitHub username, you can use the
shortcut `putmy <file name>` (see `getmy --help` for details).
"""
PUTMY_DESC = """
Call `putgist` assuming the user is set in an envvar called GETGIST_USER.
See `putgist --help` for more details.
"""
class GetGist(object):
"""
Main GetGist objects linking inputs from the CLI to the helpers from
GitHubTools (to deal with the API) and LocalTools (to deal with the local
file system.
"""
def __init__(self, **kwargs):
"""
Instantiate GitHubTools & LocalTools, and set the variables required
to get, create or update gists (filename and public/private flag)
:param user: (str) GitHub username
:param filename: (str) name of file from any Gist or local file system
:param allow_none: (bool) flag to use GitHubTools.select_gist
differently with `getgist` and `putgist` commands (if no gist/filename
is found it raises an error for `getgist`, or sets `putgist` to create
a new gist).
:param create_private: (bool) create a new gist as private
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
# get arguments
user = kwargs.get("user")
allow_none = kwargs.get("allow_none", False)
assume_yes = kwargs.get("assume_yes", False)
filename = kwargs.get("filename")
self.public = not kwargs.get("create_private", False)
# instantiate local tools & check for user
self.local = LocalTools(filename, assume_yes)
if not user:
message = """
No default user set yet. To avoid this prompt set an
environmental variable called `GETGIST_USER`.'
"""
self.local.oops(message)
# instantiate filename, guthub tools and fetch gist
self.github = GitHubTools(user, filename, assume_yes)
self.gist = self.github.select_gist(allow_none)
def get(self):
"""Reads the remote file from Gist and save it locally"""
if self.gist:
content = self.github.read_gist_file(self.gist)
self.local.save(content)
def put(self):
""" Reads local file & update the remote gist (or create a new one)"""
content = self.local.read()
if self.gist:
self.github.update(self.gist, content)
else:
self.github.create(content, public=self.public)
@command(help=GETGIST_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@argument("user")
@argument("filename")
def run_getgist(filename, user, **kwargs):
"""Passes user inputs to GetGist() and calls get()"""
assume_yes = kwargs.get("yes_to_all")
getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes)
getgist.get()
@command(help=GETMY_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@argument("filename")
def run_getmy(filename, **kwargs):
"""Shortcut for run_getgist() reading username from env var"""
assume_yes = kwargs.get("yes_to_all")
user = getenv("GETGIST_USER")
getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes)
getgist.get()
@command(help=PUTGIST_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@option("--private", "-p", is_flag=True, help="Crete new gist as private")
@argument("user")
@argument("filename")
@command(help=PUTMY_DESC)
@option("--yes-to-all", "-y", is_flag=True, help="Assume yes to all prompts.")
@option("--private", "-p", is_flag=True, help="Crete new gist as private")
@argument("filename")
def run_putmy(filename, **kwargs):
"""Shortcut for run_putgist() reading username from env var"""
assume_yes = kwargs.get("yes_to_all")
private = kwargs.get("private")
user = getenv("GETGIST_USER")
getgist = GetGist(
user=user,
filename=filename,
assume_yes=assume_yes,
create_private=private,
allow_none=True,
)
getgist.put()
|
cuducos/getgist | getgist/__main__.py | GetGist.get | python | def get(self):
if self.gist:
content = self.github.read_gist_file(self.gist)
self.local.save(content) | Reads the remote file from Gist and save it locally | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L88-L92 | [
"def read_gist_file(self, gist):\n \"\"\"\n Returns the contents of file hosted inside a gist at GitHub.\n :param gist: (dict) gist parsed by GitHubTools._parse()\n :return: (bytes) content of a gist loaded from GitHub\n \"\"\"\n url = False\n files = gist.get(\"files\")\n for gist_file in f... | class GetGist(object):
"""
Main GetGist objects linking inputs from the CLI to the helpers from
GitHubTools (to deal with the API) and LocalTools (to deal with the local
file system.
"""
def __init__(self, **kwargs):
"""
Instantiate GitHubTools & LocalTools, and set the variables required
to get, create or update gists (filename and public/private flag)
:param user: (str) GitHub username
:param filename: (str) name of file from any Gist or local file system
:param allow_none: (bool) flag to use GitHubTools.select_gist
differently with `getgist` and `putgist` commands (if no gist/filename
is found it raises an error for `getgist`, or sets `putgist` to create
a new gist).
:param create_private: (bool) create a new gist as private
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
# get arguments
user = kwargs.get("user")
allow_none = kwargs.get("allow_none", False)
assume_yes = kwargs.get("assume_yes", False)
filename = kwargs.get("filename")
self.public = not kwargs.get("create_private", False)
# instantiate local tools & check for user
self.local = LocalTools(filename, assume_yes)
if not user:
message = """
No default user set yet. To avoid this prompt set an
environmental variable called `GETGIST_USER`.'
"""
self.local.oops(message)
# instantiate filename, guthub tools and fetch gist
self.github = GitHubTools(user, filename, assume_yes)
self.gist = self.github.select_gist(allow_none)
def put(self):
""" Reads local file & update the remote gist (or create a new one)"""
content = self.local.read()
if self.gist:
self.github.update(self.gist, content)
else:
self.github.create(content, public=self.public)
|
cuducos/getgist | getgist/__main__.py | GetGist.put | python | def put(self):
content = self.local.read()
if self.gist:
self.github.update(self.gist, content)
else:
self.github.create(content, public=self.public) | Reads local file & update the remote gist (or create a new one) | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L94-L100 | [
"def read(self, file_path=None):\n \"\"\"\n Read the contents of a file.\n :param filename: (str) path to a file in the local file system\n :return: (str) contents of the file, or (False) if not found/not file\n \"\"\"\n if not file_path:\n file_path = self.file_path\n\n # abort if the f... | class GetGist(object):
"""
Main GetGist objects linking inputs from the CLI to the helpers from
GitHubTools (to deal with the API) and LocalTools (to deal with the local
file system.
"""
def __init__(self, **kwargs):
"""
Instantiate GitHubTools & LocalTools, and set the variables required
to get, create or update gists (filename and public/private flag)
:param user: (str) GitHub username
:param filename: (str) name of file from any Gist or local file system
:param allow_none: (bool) flag to use GitHubTools.select_gist
differently with `getgist` and `putgist` commands (if no gist/filename
is found it raises an error for `getgist`, or sets `putgist` to create
a new gist).
:param create_private: (bool) create a new gist as private
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
# get arguments
user = kwargs.get("user")
allow_none = kwargs.get("allow_none", False)
assume_yes = kwargs.get("assume_yes", False)
filename = kwargs.get("filename")
self.public = not kwargs.get("create_private", False)
# instantiate local tools & check for user
self.local = LocalTools(filename, assume_yes)
if not user:
message = """
No default user set yet. To avoid this prompt set an
environmental variable called `GETGIST_USER`.'
"""
self.local.oops(message)
# instantiate filename, guthub tools and fetch gist
self.github = GitHubTools(user, filename, assume_yes)
self.gist = self.github.select_gist(allow_none)
def get(self):
"""Reads the remote file from Gist and save it locally"""
if self.gist:
content = self.github.read_gist_file(self.gist)
self.local.save(content)
|
cuducos/getgist | getgist/github.py | oauth_only | python | def oauth_only(function):
def check_for_oauth(self, *args, **kwargs):
"""
Returns False if GitHubTools instance is not authenticated, or return
the decorated fucntion if it is.
"""
if not self.is_authenticated:
self.oops("To use putgist you have to set your GETGIST_TOKEN")
self.oops("(see `putgist --help` for details)")
return False
return function(self, *args, **kwargs)
return check_for_oauth | Decorator to restrict some GitHubTools methods to run only with OAuth | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L11-L25 | null | import os
from json import dumps
from pkg_resources import get_distribution
from click import prompt
from getgist import GetGistCommons
from getgist.request import GetGistRequests
class GitHubTools(GetGistCommons):
"""Helpers to deal with GitHub API and manipulate gists"""
version = get_distribution("getgist").version
api_root_url = "https://api.github.com/"
headers = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "GetGist v{}".format(version),
}
requests = GetGistRequests(headers)
is_authenticated = False
def __init__(self, user, file_path, assume_yes=False):
"""
Save basic variables to all methods, instantiate GetGistrequests and
calls the OAuth method.
:param user: (str) GitHub username
:param file_path: (str) file_path to be saved (locally), created or
updated (remotelly)
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
self.user = user
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.assume_yes = assume_yes
self.add_oauth_header()
def add_oauth_header(self):
"""
Validate token and add the proper header for further requests.
:return: (None)
"""
# abort if no token
oauth_token = self._get_token()
if not oauth_token:
return
# add oauth header & reach the api
self.headers["Authorization"] = "token " + oauth_token
url = self._api_url("user")
raw_resp = self.requests.get(url)
resp = raw_resp.json()
# abort & remove header if token is invalid
if resp.get("login", None) != self.user:
self.oops("Invalid token for user " + self.user)
self.headers.pop("Authorization")
return
self.is_authenticated = True
self.yeah("User {} authenticated".format(self.user))
def get_gists(self):
"""
List generator containing gist relevant information
such as id, description, filenames and raw URL (dict).
"""
# fetch all gists
if self.is_authenticated:
url = self._api_url("gists")
else:
url = self._api_url("users", self.user, "gists")
self.output("Fetching " + url)
raw_resp = self.requests.get(url)
# abort if user not found
if raw_resp.status_code != 200:
self.oops("User `{}` not found".format(self.user))
return
# abort if there are no gists
resp = raw_resp.json()
if not resp:
self.oops("No gists found for user `{}`".format(self.user))
return
# parse response
for gist in raw_resp.json():
yield self._parse_gist(gist)
def select_gist(self, allow_none=False):
"""
Given the requested filename, it selects the proper gist; if more than
one gist is found with the given filename, user is asked to choose.
:allow_none: (bool) for `getgist` it should raise error if no gist is
found, but setting this argument to True avoid this error, which is
useful when `putgist` is calling this method
:return: (dict) selected gist
"""
# pick up all macthing gists
matches = list()
for gist in self.get_gists():
for gist_file in gist.get("files"):
if self.filename == gist_file.get("filename"):
matches.append(gist)
# abort if no match is found
if not matches:
if allow_none:
return None
else:
msg = "No file named `{}` found in {}'s gists"
self.oops(msg.format(self.file_path, self.user))
if not self.is_authenticated:
self.warn("To access private gists set the GETGIST_TOKEN")
self.warn("(see `getgist --help` for details)")
return False
# return if there's is only one match
if len(matches) == 1 or self.assume_yes:
return matches.pop(0)
return self._ask_which_gist(matches)
def read_gist_file(self, gist):
"""
Returns the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse()
:return: (bytes) content of a gist loaded from GitHub
"""
url = False
files = gist.get("files")
for gist_file in files:
if gist_file.get("filename") == self.filename:
url = gist_file.get("raw_url")
break
if url:
self.output("Reading {}".format(url))
response = self.requests.get(url)
return response.content
@oauth_only
def update(self, gist, content):
"""
Updates the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse_gist()
:param content: (str or bytes) to be written
:return: (bool) indicatind the success or failure of the update
"""
# abort if content is False
if content is False:
return False
# request
url = self._api_url("gists", gist.get("id"))
data = {"files": {self.filename: {"content": content}}}
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.patch(url, data=dumps(data))
# error
if response.status_code != 200:
self.oops("Could not update " + gist.get("description"))
self.oops("PATCH request returned " + str(response.status_code))
return False
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
@oauth_only
def create(self, content, **kwargs):
"""
Create a new gist.
:param gist: (dict) gist parsed by GitHubTools._parse()
:param content: (str or bytes) to be written
:param public: (bool) defines if the gist is public or private
:return: (bool) indicatind the success or failure of the creation
"""
# abort if content is False
if content is False:
return False
# set new gist
public = bool(kwargs.get("public", True))
data = {
"description": self.filename,
"public": public,
"files": {self.filename: {"content": content}},
}
# send request
url = self._api_url("gists")
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.post(url, data=dumps(data))
# error
if response.status_code != 201:
self.oops("Could not create " + self.filename)
self.oops("POST request returned " + str(response.status_code))
return False
# parse created gist
gist = self._parse_gist(response.json())
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
def _ask_which_gist(self, matches):
"""
Asks user which gist to use in case of more than one gist matching the
instance filename.
:param matches: (list) of dictioaries generated within select_gists()
:return: (dict) of the selected gist
"""
# ask user which gist to use
self.hey("Use {} from which gist?".format(self.filename))
for count, gist in enumerate(matches, 1):
self.hey("[{}] {}".format(count, gist.get("description")))
# get the gist index
selected = False
while not selected:
gist_index = prompt("Type the number: ", type=int) - 1
try:
selected = matches[gist_index]
except IndexError:
self.oops("Invalid number, please try again.")
self.output("Using `{}` Gist".format(selected["description"]))
return selected
def _api_url(self, *args):
"""Get entrypoints adding arguments separated by slashes"""
return self.api_root_url + "/".join(args)
@staticmethod
def _parse_gist(gist):
"""Receive a gist (dict) and parse it to GetGist"""
# parse files
files = list()
file_names = sorted(filename for filename in gist["files"].keys())
for name in file_names:
files.append(
dict(filename=name, raw_url=gist["files"][name].get("raw_url"))
)
# parse description
description = gist["description"]
if not description:
names = sorted(f.get("filename") for f in files)
description = names.pop(0)
return dict(
description=description,
id=gist.get("id"),
files=files,
url=gist.get("html_url"),
)
@staticmethod
def _get_token():
"""Retrieve username from env var"""
return os.getenv("GETGIST_TOKEN")
|
cuducos/getgist | getgist/github.py | GitHubTools.add_oauth_header | python | def add_oauth_header(self):
# abort if no token
oauth_token = self._get_token()
if not oauth_token:
return
# add oauth header & reach the api
self.headers["Authorization"] = "token " + oauth_token
url = self._api_url("user")
raw_resp = self.requests.get(url)
resp = raw_resp.json()
# abort & remove header if token is invalid
if resp.get("login", None) != self.user:
self.oops("Invalid token for user " + self.user)
self.headers.pop("Authorization")
return
self.is_authenticated = True
self.yeah("User {} authenticated".format(self.user)) | Validate token and add the proper header for further requests.
:return: (None) | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L56-L79 | [
"def oops(self, message):\n \"\"\"Helper to colorize error messages\"\"\"\n return self.output(message, color=\"red\")\n",
"def yeah(self, message):\n \"\"\"Helper to colorize success messages\"\"\"\n return self.output(message, color=\"green\")\n",
"def _api_url(self, *args):\n \"\"\"Get entrypo... | class GitHubTools(GetGistCommons):
"""Helpers to deal with GitHub API and manipulate gists"""
version = get_distribution("getgist").version
api_root_url = "https://api.github.com/"
headers = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "GetGist v{}".format(version),
}
requests = GetGistRequests(headers)
is_authenticated = False
def __init__(self, user, file_path, assume_yes=False):
"""
Save basic variables to all methods, instantiate GetGistrequests and
calls the OAuth method.
:param user: (str) GitHub username
:param file_path: (str) file_path to be saved (locally), created or
updated (remotelly)
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
self.user = user
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.assume_yes = assume_yes
self.add_oauth_header()
def get_gists(self):
"""
List generator containing gist relevant information
such as id, description, filenames and raw URL (dict).
"""
# fetch all gists
if self.is_authenticated:
url = self._api_url("gists")
else:
url = self._api_url("users", self.user, "gists")
self.output("Fetching " + url)
raw_resp = self.requests.get(url)
# abort if user not found
if raw_resp.status_code != 200:
self.oops("User `{}` not found".format(self.user))
return
# abort if there are no gists
resp = raw_resp.json()
if not resp:
self.oops("No gists found for user `{}`".format(self.user))
return
# parse response
for gist in raw_resp.json():
yield self._parse_gist(gist)
def select_gist(self, allow_none=False):
"""
Given the requested filename, it selects the proper gist; if more than
one gist is found with the given filename, user is asked to choose.
:allow_none: (bool) for `getgist` it should raise error if no gist is
found, but setting this argument to True avoid this error, which is
useful when `putgist` is calling this method
:return: (dict) selected gist
"""
# pick up all macthing gists
matches = list()
for gist in self.get_gists():
for gist_file in gist.get("files"):
if self.filename == gist_file.get("filename"):
matches.append(gist)
# abort if no match is found
if not matches:
if allow_none:
return None
else:
msg = "No file named `{}` found in {}'s gists"
self.oops(msg.format(self.file_path, self.user))
if not self.is_authenticated:
self.warn("To access private gists set the GETGIST_TOKEN")
self.warn("(see `getgist --help` for details)")
return False
# return if there's is only one match
if len(matches) == 1 or self.assume_yes:
return matches.pop(0)
return self._ask_which_gist(matches)
def read_gist_file(self, gist):
"""
Returns the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse()
:return: (bytes) content of a gist loaded from GitHub
"""
url = False
files = gist.get("files")
for gist_file in files:
if gist_file.get("filename") == self.filename:
url = gist_file.get("raw_url")
break
if url:
self.output("Reading {}".format(url))
response = self.requests.get(url)
return response.content
@oauth_only
def update(self, gist, content):
"""
Updates the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse_gist()
:param content: (str or bytes) to be written
:return: (bool) indicatind the success or failure of the update
"""
# abort if content is False
if content is False:
return False
# request
url = self._api_url("gists", gist.get("id"))
data = {"files": {self.filename: {"content": content}}}
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.patch(url, data=dumps(data))
# error
if response.status_code != 200:
self.oops("Could not update " + gist.get("description"))
self.oops("PATCH request returned " + str(response.status_code))
return False
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
@oauth_only
def create(self, content, **kwargs):
"""
Create a new gist.
:param gist: (dict) gist parsed by GitHubTools._parse()
:param content: (str or bytes) to be written
:param public: (bool) defines if the gist is public or private
:return: (bool) indicatind the success or failure of the creation
"""
# abort if content is False
if content is False:
return False
# set new gist
public = bool(kwargs.get("public", True))
data = {
"description": self.filename,
"public": public,
"files": {self.filename: {"content": content}},
}
# send request
url = self._api_url("gists")
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.post(url, data=dumps(data))
# error
if response.status_code != 201:
self.oops("Could not create " + self.filename)
self.oops("POST request returned " + str(response.status_code))
return False
# parse created gist
gist = self._parse_gist(response.json())
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
def _ask_which_gist(self, matches):
"""
Asks user which gist to use in case of more than one gist matching the
instance filename.
:param matches: (list) of dictioaries generated within select_gists()
:return: (dict) of the selected gist
"""
# ask user which gist to use
self.hey("Use {} from which gist?".format(self.filename))
for count, gist in enumerate(matches, 1):
self.hey("[{}] {}".format(count, gist.get("description")))
# get the gist index
selected = False
while not selected:
gist_index = prompt("Type the number: ", type=int) - 1
try:
selected = matches[gist_index]
except IndexError:
self.oops("Invalid number, please try again.")
self.output("Using `{}` Gist".format(selected["description"]))
return selected
def _api_url(self, *args):
"""Get entrypoints adding arguments separated by slashes"""
return self.api_root_url + "/".join(args)
@staticmethod
def _parse_gist(gist):
"""Receive a gist (dict) and parse it to GetGist"""
# parse files
files = list()
file_names = sorted(filename for filename in gist["files"].keys())
for name in file_names:
files.append(
dict(filename=name, raw_url=gist["files"][name].get("raw_url"))
)
# parse description
description = gist["description"]
if not description:
names = sorted(f.get("filename") for f in files)
description = names.pop(0)
return dict(
description=description,
id=gist.get("id"),
files=files,
url=gist.get("html_url"),
)
@staticmethod
def _get_token():
"""Retrieve username from env var"""
return os.getenv("GETGIST_TOKEN")
|
cuducos/getgist | getgist/github.py | GitHubTools.get_gists | python | def get_gists(self):
# fetch all gists
if self.is_authenticated:
url = self._api_url("gists")
else:
url = self._api_url("users", self.user, "gists")
self.output("Fetching " + url)
raw_resp = self.requests.get(url)
# abort if user not found
if raw_resp.status_code != 200:
self.oops("User `{}` not found".format(self.user))
return
# abort if there are no gists
resp = raw_resp.json()
if not resp:
self.oops("No gists found for user `{}`".format(self.user))
return
# parse response
for gist in raw_resp.json():
yield self._parse_gist(gist) | List generator containing gist relevant information
such as id, description, filenames and raw URL (dict). | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L81-L107 | [
"def output(self, message, color=None):\n \"\"\"\n A helper to used like print() or click's secho() tunneling all the\n outputs to sys.stdout or sys.stderr\n :param message: (str)\n :param color: (str) check click.secho() documentation\n :return: (None) prints to sys.stdout or sys.stderr\n \"\"... | class GitHubTools(GetGistCommons):
"""Helpers to deal with GitHub API and manipulate gists"""
version = get_distribution("getgist").version
api_root_url = "https://api.github.com/"
headers = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "GetGist v{}".format(version),
}
requests = GetGistRequests(headers)
is_authenticated = False
def __init__(self, user, file_path, assume_yes=False):
"""
Save basic variables to all methods, instantiate GetGistrequests and
calls the OAuth method.
:param user: (str) GitHub username
:param file_path: (str) file_path to be saved (locally), created or
updated (remotelly)
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
self.user = user
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.assume_yes = assume_yes
self.add_oauth_header()
def add_oauth_header(self):
"""
Validate token and add the proper header for further requests.
:return: (None)
"""
# abort if no token
oauth_token = self._get_token()
if not oauth_token:
return
# add oauth header & reach the api
self.headers["Authorization"] = "token " + oauth_token
url = self._api_url("user")
raw_resp = self.requests.get(url)
resp = raw_resp.json()
# abort & remove header if token is invalid
if resp.get("login", None) != self.user:
self.oops("Invalid token for user " + self.user)
self.headers.pop("Authorization")
return
self.is_authenticated = True
self.yeah("User {} authenticated".format(self.user))
def select_gist(self, allow_none=False):
"""
Given the requested filename, it selects the proper gist; if more than
one gist is found with the given filename, user is asked to choose.
:allow_none: (bool) for `getgist` it should raise error if no gist is
found, but setting this argument to True avoid this error, which is
useful when `putgist` is calling this method
:return: (dict) selected gist
"""
# pick up all macthing gists
matches = list()
for gist in self.get_gists():
for gist_file in gist.get("files"):
if self.filename == gist_file.get("filename"):
matches.append(gist)
# abort if no match is found
if not matches:
if allow_none:
return None
else:
msg = "No file named `{}` found in {}'s gists"
self.oops(msg.format(self.file_path, self.user))
if not self.is_authenticated:
self.warn("To access private gists set the GETGIST_TOKEN")
self.warn("(see `getgist --help` for details)")
return False
# return if there's is only one match
if len(matches) == 1 or self.assume_yes:
return matches.pop(0)
return self._ask_which_gist(matches)
def read_gist_file(self, gist):
"""
Returns the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse()
:return: (bytes) content of a gist loaded from GitHub
"""
url = False
files = gist.get("files")
for gist_file in files:
if gist_file.get("filename") == self.filename:
url = gist_file.get("raw_url")
break
if url:
self.output("Reading {}".format(url))
response = self.requests.get(url)
return response.content
@oauth_only
def update(self, gist, content):
"""
Updates the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse_gist()
:param content: (str or bytes) to be written
:return: (bool) indicatind the success or failure of the update
"""
# abort if content is False
if content is False:
return False
# request
url = self._api_url("gists", gist.get("id"))
data = {"files": {self.filename: {"content": content}}}
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.patch(url, data=dumps(data))
# error
if response.status_code != 200:
self.oops("Could not update " + gist.get("description"))
self.oops("PATCH request returned " + str(response.status_code))
return False
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
@oauth_only
def create(self, content, **kwargs):
"""
Create a new gist.
:param gist: (dict) gist parsed by GitHubTools._parse()
:param content: (str or bytes) to be written
:param public: (bool) defines if the gist is public or private
:return: (bool) indicatind the success or failure of the creation
"""
# abort if content is False
if content is False:
return False
# set new gist
public = bool(kwargs.get("public", True))
data = {
"description": self.filename,
"public": public,
"files": {self.filename: {"content": content}},
}
# send request
url = self._api_url("gists")
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.post(url, data=dumps(data))
# error
if response.status_code != 201:
self.oops("Could not create " + self.filename)
self.oops("POST request returned " + str(response.status_code))
return False
# parse created gist
gist = self._parse_gist(response.json())
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
def _ask_which_gist(self, matches):
"""
Asks user which gist to use in case of more than one gist matching the
instance filename.
:param matches: (list) of dictioaries generated within select_gists()
:return: (dict) of the selected gist
"""
# ask user which gist to use
self.hey("Use {} from which gist?".format(self.filename))
for count, gist in enumerate(matches, 1):
self.hey("[{}] {}".format(count, gist.get("description")))
# get the gist index
selected = False
while not selected:
gist_index = prompt("Type the number: ", type=int) - 1
try:
selected = matches[gist_index]
except IndexError:
self.oops("Invalid number, please try again.")
self.output("Using `{}` Gist".format(selected["description"]))
return selected
def _api_url(self, *args):
"""Get entrypoints adding arguments separated by slashes"""
return self.api_root_url + "/".join(args)
@staticmethod
def _parse_gist(gist):
"""Receive a gist (dict) and parse it to GetGist"""
# parse files
files = list()
file_names = sorted(filename for filename in gist["files"].keys())
for name in file_names:
files.append(
dict(filename=name, raw_url=gist["files"][name].get("raw_url"))
)
# parse description
description = gist["description"]
if not description:
names = sorted(f.get("filename") for f in files)
description = names.pop(0)
return dict(
description=description,
id=gist.get("id"),
files=files,
url=gist.get("html_url"),
)
@staticmethod
def _get_token():
"""Retrieve username from env var"""
return os.getenv("GETGIST_TOKEN")
|
cuducos/getgist | getgist/github.py | GitHubTools.select_gist | python | def select_gist(self, allow_none=False):
# pick up all macthing gists
matches = list()
for gist in self.get_gists():
for gist_file in gist.get("files"):
if self.filename == gist_file.get("filename"):
matches.append(gist)
# abort if no match is found
if not matches:
if allow_none:
return None
else:
msg = "No file named `{}` found in {}'s gists"
self.oops(msg.format(self.file_path, self.user))
if not self.is_authenticated:
self.warn("To access private gists set the GETGIST_TOKEN")
self.warn("(see `getgist --help` for details)")
return False
# return if there's is only one match
if len(matches) == 1 or self.assume_yes:
return matches.pop(0)
return self._ask_which_gist(matches) | Given the requested filename, it selects the proper gist; if more than
one gist is found with the given filename, user is asked to choose.
:allow_none: (bool) for `getgist` it should raise error if no gist is
found, but setting this argument to True avoid this error, which is
useful when `putgist` is calling this method
:return: (dict) selected gist | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L109-L141 | [
"def oops(self, message):\n \"\"\"Helper to colorize error messages\"\"\"\n return self.output(message, color=\"red\")\n",
"def warn(self, message):\n \"\"\"Helper to colorize warning messages\"\"\"\n return self.output(message, color=\"yellow\")\n",
"def get_gists(self):\n \"\"\"\n List gener... | class GitHubTools(GetGistCommons):
"""Helpers to deal with GitHub API and manipulate gists"""
version = get_distribution("getgist").version
api_root_url = "https://api.github.com/"
headers = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "GetGist v{}".format(version),
}
requests = GetGistRequests(headers)
is_authenticated = False
def __init__(self, user, file_path, assume_yes=False):
"""
Save basic variables to all methods, instantiate GetGistrequests and
calls the OAuth method.
:param user: (str) GitHub username
:param file_path: (str) file_path to be saved (locally), created or
updated (remotelly)
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
self.user = user
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.assume_yes = assume_yes
self.add_oauth_header()
def add_oauth_header(self):
"""
Validate token and add the proper header for further requests.
:return: (None)
"""
# abort if no token
oauth_token = self._get_token()
if not oauth_token:
return
# add oauth header & reach the api
self.headers["Authorization"] = "token " + oauth_token
url = self._api_url("user")
raw_resp = self.requests.get(url)
resp = raw_resp.json()
# abort & remove header if token is invalid
if resp.get("login", None) != self.user:
self.oops("Invalid token for user " + self.user)
self.headers.pop("Authorization")
return
self.is_authenticated = True
self.yeah("User {} authenticated".format(self.user))
def get_gists(self):
"""
List generator containing gist relevant information
such as id, description, filenames and raw URL (dict).
"""
# fetch all gists
if self.is_authenticated:
url = self._api_url("gists")
else:
url = self._api_url("users", self.user, "gists")
self.output("Fetching " + url)
raw_resp = self.requests.get(url)
# abort if user not found
if raw_resp.status_code != 200:
self.oops("User `{}` not found".format(self.user))
return
# abort if there are no gists
resp = raw_resp.json()
if not resp:
self.oops("No gists found for user `{}`".format(self.user))
return
# parse response
for gist in raw_resp.json():
yield self._parse_gist(gist)
def read_gist_file(self, gist):
"""
Returns the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse()
:return: (bytes) content of a gist loaded from GitHub
"""
url = False
files = gist.get("files")
for gist_file in files:
if gist_file.get("filename") == self.filename:
url = gist_file.get("raw_url")
break
if url:
self.output("Reading {}".format(url))
response = self.requests.get(url)
return response.content
@oauth_only
def update(self, gist, content):
"""
Updates the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse_gist()
:param content: (str or bytes) to be written
:return: (bool) indicatind the success or failure of the update
"""
# abort if content is False
if content is False:
return False
# request
url = self._api_url("gists", gist.get("id"))
data = {"files": {self.filename: {"content": content}}}
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.patch(url, data=dumps(data))
# error
if response.status_code != 200:
self.oops("Could not update " + gist.get("description"))
self.oops("PATCH request returned " + str(response.status_code))
return False
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
@oauth_only
def create(self, content, **kwargs):
"""
Create a new gist.
:param gist: (dict) gist parsed by GitHubTools._parse()
:param content: (str or bytes) to be written
:param public: (bool) defines if the gist is public or private
:return: (bool) indicatind the success or failure of the creation
"""
# abort if content is False
if content is False:
return False
# set new gist
public = bool(kwargs.get("public", True))
data = {
"description": self.filename,
"public": public,
"files": {self.filename: {"content": content}},
}
# send request
url = self._api_url("gists")
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.post(url, data=dumps(data))
# error
if response.status_code != 201:
self.oops("Could not create " + self.filename)
self.oops("POST request returned " + str(response.status_code))
return False
# parse created gist
gist = self._parse_gist(response.json())
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
def _ask_which_gist(self, matches):
"""
Asks user which gist to use in case of more than one gist matching the
instance filename.
:param matches: (list) of dictioaries generated within select_gists()
:return: (dict) of the selected gist
"""
# ask user which gist to use
self.hey("Use {} from which gist?".format(self.filename))
for count, gist in enumerate(matches, 1):
self.hey("[{}] {}".format(count, gist.get("description")))
# get the gist index
selected = False
while not selected:
gist_index = prompt("Type the number: ", type=int) - 1
try:
selected = matches[gist_index]
except IndexError:
self.oops("Invalid number, please try again.")
self.output("Using `{}` Gist".format(selected["description"]))
return selected
def _api_url(self, *args):
"""Get entrypoints adding arguments separated by slashes"""
return self.api_root_url + "/".join(args)
@staticmethod
def _parse_gist(gist):
"""Receive a gist (dict) and parse it to GetGist"""
# parse files
files = list()
file_names = sorted(filename for filename in gist["files"].keys())
for name in file_names:
files.append(
dict(filename=name, raw_url=gist["files"][name].get("raw_url"))
)
# parse description
description = gist["description"]
if not description:
names = sorted(f.get("filename") for f in files)
description = names.pop(0)
return dict(
description=description,
id=gist.get("id"),
files=files,
url=gist.get("html_url"),
)
@staticmethod
def _get_token():
"""Retrieve username from env var"""
return os.getenv("GETGIST_TOKEN")
|
cuducos/getgist | getgist/github.py | GitHubTools.read_gist_file | python | def read_gist_file(self, gist):
url = False
files = gist.get("files")
for gist_file in files:
if gist_file.get("filename") == self.filename:
url = gist_file.get("raw_url")
break
if url:
self.output("Reading {}".format(url))
response = self.requests.get(url)
return response.content | Returns the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse()
:return: (bytes) content of a gist loaded from GitHub | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L143-L158 | null | class GitHubTools(GetGistCommons):
"""Helpers to deal with GitHub API and manipulate gists"""
version = get_distribution("getgist").version
api_root_url = "https://api.github.com/"
headers = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "GetGist v{}".format(version),
}
requests = GetGistRequests(headers)
is_authenticated = False
def __init__(self, user, file_path, assume_yes=False):
"""
Save basic variables to all methods, instantiate GetGistrequests and
calls the OAuth method.
:param user: (str) GitHub username
:param file_path: (str) file_path to be saved (locally), created or
updated (remotelly)
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
self.user = user
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.assume_yes = assume_yes
self.add_oauth_header()
def add_oauth_header(self):
"""
Validate token and add the proper header for further requests.
:return: (None)
"""
# abort if no token
oauth_token = self._get_token()
if not oauth_token:
return
# add oauth header & reach the api
self.headers["Authorization"] = "token " + oauth_token
url = self._api_url("user")
raw_resp = self.requests.get(url)
resp = raw_resp.json()
# abort & remove header if token is invalid
if resp.get("login", None) != self.user:
self.oops("Invalid token for user " + self.user)
self.headers.pop("Authorization")
return
self.is_authenticated = True
self.yeah("User {} authenticated".format(self.user))
def get_gists(self):
"""
List generator containing gist relevant information
such as id, description, filenames and raw URL (dict).
"""
# fetch all gists
if self.is_authenticated:
url = self._api_url("gists")
else:
url = self._api_url("users", self.user, "gists")
self.output("Fetching " + url)
raw_resp = self.requests.get(url)
# abort if user not found
if raw_resp.status_code != 200:
self.oops("User `{}` not found".format(self.user))
return
# abort if there are no gists
resp = raw_resp.json()
if not resp:
self.oops("No gists found for user `{}`".format(self.user))
return
# parse response
for gist in raw_resp.json():
yield self._parse_gist(gist)
def select_gist(self, allow_none=False):
"""
Given the requested filename, it selects the proper gist; if more than
one gist is found with the given filename, user is asked to choose.
:allow_none: (bool) for `getgist` it should raise error if no gist is
found, but setting this argument to True avoid this error, which is
useful when `putgist` is calling this method
:return: (dict) selected gist
"""
# pick up all macthing gists
matches = list()
for gist in self.get_gists():
for gist_file in gist.get("files"):
if self.filename == gist_file.get("filename"):
matches.append(gist)
# abort if no match is found
if not matches:
if allow_none:
return None
else:
msg = "No file named `{}` found in {}'s gists"
self.oops(msg.format(self.file_path, self.user))
if not self.is_authenticated:
self.warn("To access private gists set the GETGIST_TOKEN")
self.warn("(see `getgist --help` for details)")
return False
# return if there's is only one match
if len(matches) == 1 or self.assume_yes:
return matches.pop(0)
return self._ask_which_gist(matches)
@oauth_only
def update(self, gist, content):
"""
Updates the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse_gist()
:param content: (str or bytes) to be written
:return: (bool) indicatind the success or failure of the update
"""
# abort if content is False
if content is False:
return False
# request
url = self._api_url("gists", gist.get("id"))
data = {"files": {self.filename: {"content": content}}}
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.patch(url, data=dumps(data))
# error
if response.status_code != 200:
self.oops("Could not update " + gist.get("description"))
self.oops("PATCH request returned " + str(response.status_code))
return False
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
@oauth_only
def create(self, content, **kwargs):
"""
Create a new gist.
:param gist: (dict) gist parsed by GitHubTools._parse()
:param content: (str or bytes) to be written
:param public: (bool) defines if the gist is public or private
:return: (bool) indicatind the success or failure of the creation
"""
# abort if content is False
if content is False:
return False
# set new gist
public = bool(kwargs.get("public", True))
data = {
"description": self.filename,
"public": public,
"files": {self.filename: {"content": content}},
}
# send request
url = self._api_url("gists")
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.post(url, data=dumps(data))
# error
if response.status_code != 201:
self.oops("Could not create " + self.filename)
self.oops("POST request returned " + str(response.status_code))
return False
# parse created gist
gist = self._parse_gist(response.json())
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
def _ask_which_gist(self, matches):
"""
Asks user which gist to use in case of more than one gist matching the
instance filename.
:param matches: (list) of dictioaries generated within select_gists()
:return: (dict) of the selected gist
"""
# ask user which gist to use
self.hey("Use {} from which gist?".format(self.filename))
for count, gist in enumerate(matches, 1):
self.hey("[{}] {}".format(count, gist.get("description")))
# get the gist index
selected = False
while not selected:
gist_index = prompt("Type the number: ", type=int) - 1
try:
selected = matches[gist_index]
except IndexError:
self.oops("Invalid number, please try again.")
self.output("Using `{}` Gist".format(selected["description"]))
return selected
def _api_url(self, *args):
"""Get entrypoints adding arguments separated by slashes"""
return self.api_root_url + "/".join(args)
@staticmethod
def _parse_gist(gist):
"""Receive a gist (dict) and parse it to GetGist"""
# parse files
files = list()
file_names = sorted(filename for filename in gist["files"].keys())
for name in file_names:
files.append(
dict(filename=name, raw_url=gist["files"][name].get("raw_url"))
)
# parse description
description = gist["description"]
if not description:
names = sorted(f.get("filename") for f in files)
description = names.pop(0)
return dict(
description=description,
id=gist.get("id"),
files=files,
url=gist.get("html_url"),
)
@staticmethod
def _get_token():
"""Retrieve username from env var"""
return os.getenv("GETGIST_TOKEN")
|
cuducos/getgist | getgist/github.py | GitHubTools.update | python | def update(self, gist, content):
# abort if content is False
if content is False:
return False
# request
url = self._api_url("gists", gist.get("id"))
data = {"files": {self.filename: {"content": content}}}
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.patch(url, data=dumps(data))
# error
if response.status_code != 200:
self.oops("Could not update " + gist.get("description"))
self.oops("PATCH request returned " + str(response.status_code))
return False
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True | Updates the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse_gist()
:param content: (str or bytes) to be written
:return: (bool) indicatind the success or failure of the update | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L161-L187 | [
"def output(self, message, color=None):\n \"\"\"\n A helper to used like print() or click's secho() tunneling all the\n outputs to sys.stdout or sys.stderr\n :param message: (str)\n :param color: (str) check click.secho() documentation\n :return: (None) prints to sys.stdout or sys.stderr\n \"\"... | class GitHubTools(GetGistCommons):
"""Helpers to deal with GitHub API and manipulate gists"""
version = get_distribution("getgist").version
api_root_url = "https://api.github.com/"
headers = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "GetGist v{}".format(version),
}
requests = GetGistRequests(headers)
is_authenticated = False
def __init__(self, user, file_path, assume_yes=False):
"""
Save basic variables to all methods, instantiate GetGistrequests and
calls the OAuth method.
:param user: (str) GitHub username
:param file_path: (str) file_path to be saved (locally), created or
updated (remotelly)
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
self.user = user
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.assume_yes = assume_yes
self.add_oauth_header()
def add_oauth_header(self):
"""
Validate token and add the proper header for further requests.
:return: (None)
"""
# abort if no token
oauth_token = self._get_token()
if not oauth_token:
return
# add oauth header & reach the api
self.headers["Authorization"] = "token " + oauth_token
url = self._api_url("user")
raw_resp = self.requests.get(url)
resp = raw_resp.json()
# abort & remove header if token is invalid
if resp.get("login", None) != self.user:
self.oops("Invalid token for user " + self.user)
self.headers.pop("Authorization")
return
self.is_authenticated = True
self.yeah("User {} authenticated".format(self.user))
def get_gists(self):
"""
List generator containing gist relevant information
such as id, description, filenames and raw URL (dict).
"""
# fetch all gists
if self.is_authenticated:
url = self._api_url("gists")
else:
url = self._api_url("users", self.user, "gists")
self.output("Fetching " + url)
raw_resp = self.requests.get(url)
# abort if user not found
if raw_resp.status_code != 200:
self.oops("User `{}` not found".format(self.user))
return
# abort if there are no gists
resp = raw_resp.json()
if not resp:
self.oops("No gists found for user `{}`".format(self.user))
return
# parse response
for gist in raw_resp.json():
yield self._parse_gist(gist)
def select_gist(self, allow_none=False):
"""
Given the requested filename, it selects the proper gist; if more than
one gist is found with the given filename, user is asked to choose.
:allow_none: (bool) for `getgist` it should raise error if no gist is
found, but setting this argument to True avoid this error, which is
useful when `putgist` is calling this method
:return: (dict) selected gist
"""
# pick up all macthing gists
matches = list()
for gist in self.get_gists():
for gist_file in gist.get("files"):
if self.filename == gist_file.get("filename"):
matches.append(gist)
# abort if no match is found
if not matches:
if allow_none:
return None
else:
msg = "No file named `{}` found in {}'s gists"
self.oops(msg.format(self.file_path, self.user))
if not self.is_authenticated:
self.warn("To access private gists set the GETGIST_TOKEN")
self.warn("(see `getgist --help` for details)")
return False
# return if there's is only one match
if len(matches) == 1 or self.assume_yes:
return matches.pop(0)
return self._ask_which_gist(matches)
def read_gist_file(self, gist):
"""
Returns the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse()
:return: (bytes) content of a gist loaded from GitHub
"""
url = False
files = gist.get("files")
for gist_file in files:
if gist_file.get("filename") == self.filename:
url = gist_file.get("raw_url")
break
if url:
self.output("Reading {}".format(url))
response = self.requests.get(url)
return response.content
@oauth_only
@oauth_only
def create(self, content, **kwargs):
"""
Create a new gist.
:param gist: (dict) gist parsed by GitHubTools._parse()
:param content: (str or bytes) to be written
:param public: (bool) defines if the gist is public or private
:return: (bool) indicatind the success or failure of the creation
"""
# abort if content is False
if content is False:
return False
# set new gist
public = bool(kwargs.get("public", True))
data = {
"description": self.filename,
"public": public,
"files": {self.filename: {"content": content}},
}
# send request
url = self._api_url("gists")
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.post(url, data=dumps(data))
# error
if response.status_code != 201:
self.oops("Could not create " + self.filename)
self.oops("POST request returned " + str(response.status_code))
return False
# parse created gist
gist = self._parse_gist(response.json())
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
def _ask_which_gist(self, matches):
"""
Asks user which gist to use in case of more than one gist matching the
instance filename.
:param matches: (list) of dictioaries generated within select_gists()
:return: (dict) of the selected gist
"""
# ask user which gist to use
self.hey("Use {} from which gist?".format(self.filename))
for count, gist in enumerate(matches, 1):
self.hey("[{}] {}".format(count, gist.get("description")))
# get the gist index
selected = False
while not selected:
gist_index = prompt("Type the number: ", type=int) - 1
try:
selected = matches[gist_index]
except IndexError:
self.oops("Invalid number, please try again.")
self.output("Using `{}` Gist".format(selected["description"]))
return selected
def _api_url(self, *args):
"""Get entrypoints adding arguments separated by slashes"""
return self.api_root_url + "/".join(args)
@staticmethod
def _parse_gist(gist):
"""Receive a gist (dict) and parse it to GetGist"""
# parse files
files = list()
file_names = sorted(filename for filename in gist["files"].keys())
for name in file_names:
files.append(
dict(filename=name, raw_url=gist["files"][name].get("raw_url"))
)
# parse description
description = gist["description"]
if not description:
names = sorted(f.get("filename") for f in files)
description = names.pop(0)
return dict(
description=description,
id=gist.get("id"),
files=files,
url=gist.get("html_url"),
)
@staticmethod
def _get_token():
"""Retrieve username from env var"""
return os.getenv("GETGIST_TOKEN")
|
cuducos/getgist | getgist/github.py | GitHubTools.create | python | def create(self, content, **kwargs):
# abort if content is False
if content is False:
return False
# set new gist
public = bool(kwargs.get("public", True))
data = {
"description": self.filename,
"public": public,
"files": {self.filename: {"content": content}},
}
# send request
url = self._api_url("gists")
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.post(url, data=dumps(data))
# error
if response.status_code != 201:
self.oops("Could not create " + self.filename)
self.oops("POST request returned " + str(response.status_code))
return False
# parse created gist
gist = self._parse_gist(response.json())
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True | Create a new gist.
:param gist: (dict) gist parsed by GitHubTools._parse()
:param content: (str or bytes) to be written
:param public: (bool) defines if the gist is public or private
:return: (bool) indicatind the success or failure of the creation | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L190-L227 | [
"def output(self, message, color=None):\n \"\"\"\n A helper to used like print() or click's secho() tunneling all the\n outputs to sys.stdout or sys.stderr\n :param message: (str)\n :param color: (str) check click.secho() documentation\n :return: (None) prints to sys.stdout or sys.stderr\n \"\"... | class GitHubTools(GetGistCommons):
"""Helpers to deal with GitHub API and manipulate gists"""
version = get_distribution("getgist").version
api_root_url = "https://api.github.com/"
headers = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "GetGist v{}".format(version),
}
requests = GetGistRequests(headers)
is_authenticated = False
def __init__(self, user, file_path, assume_yes=False):
"""
Save basic variables to all methods, instantiate GetGistrequests and
calls the OAuth method.
:param user: (str) GitHub username
:param file_path: (str) file_path to be saved (locally), created or
updated (remotelly)
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
self.user = user
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.assume_yes = assume_yes
self.add_oauth_header()
def add_oauth_header(self):
"""
Validate token and add the proper header for further requests.
:return: (None)
"""
# abort if no token
oauth_token = self._get_token()
if not oauth_token:
return
# add oauth header & reach the api
self.headers["Authorization"] = "token " + oauth_token
url = self._api_url("user")
raw_resp = self.requests.get(url)
resp = raw_resp.json()
# abort & remove header if token is invalid
if resp.get("login", None) != self.user:
self.oops("Invalid token for user " + self.user)
self.headers.pop("Authorization")
return
self.is_authenticated = True
self.yeah("User {} authenticated".format(self.user))
def get_gists(self):
"""
List generator containing gist relevant information
such as id, description, filenames and raw URL (dict).
"""
# fetch all gists
if self.is_authenticated:
url = self._api_url("gists")
else:
url = self._api_url("users", self.user, "gists")
self.output("Fetching " + url)
raw_resp = self.requests.get(url)
# abort if user not found
if raw_resp.status_code != 200:
self.oops("User `{}` not found".format(self.user))
return
# abort if there are no gists
resp = raw_resp.json()
if not resp:
self.oops("No gists found for user `{}`".format(self.user))
return
# parse response
for gist in raw_resp.json():
yield self._parse_gist(gist)
def select_gist(self, allow_none=False):
"""
Given the requested filename, it selects the proper gist; if more than
one gist is found with the given filename, user is asked to choose.
:allow_none: (bool) for `getgist` it should raise error if no gist is
found, but setting this argument to True avoid this error, which is
useful when `putgist` is calling this method
:return: (dict) selected gist
"""
# pick up all macthing gists
matches = list()
for gist in self.get_gists():
for gist_file in gist.get("files"):
if self.filename == gist_file.get("filename"):
matches.append(gist)
# abort if no match is found
if not matches:
if allow_none:
return None
else:
msg = "No file named `{}` found in {}'s gists"
self.oops(msg.format(self.file_path, self.user))
if not self.is_authenticated:
self.warn("To access private gists set the GETGIST_TOKEN")
self.warn("(see `getgist --help` for details)")
return False
# return if there's is only one match
if len(matches) == 1 or self.assume_yes:
return matches.pop(0)
return self._ask_which_gist(matches)
def read_gist_file(self, gist):
"""
Returns the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse()
:return: (bytes) content of a gist loaded from GitHub
"""
url = False
files = gist.get("files")
for gist_file in files:
if gist_file.get("filename") == self.filename:
url = gist_file.get("raw_url")
break
if url:
self.output("Reading {}".format(url))
response = self.requests.get(url)
return response.content
@oauth_only
def update(self, gist, content):
"""
Updates the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse_gist()
:param content: (str or bytes) to be written
:return: (bool) indicatind the success or failure of the update
"""
# abort if content is False
if content is False:
return False
# request
url = self._api_url("gists", gist.get("id"))
data = {"files": {self.filename: {"content": content}}}
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.patch(url, data=dumps(data))
# error
if response.status_code != 200:
self.oops("Could not update " + gist.get("description"))
self.oops("PATCH request returned " + str(response.status_code))
return False
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
@oauth_only
def _ask_which_gist(self, matches):
"""
Asks user which gist to use in case of more than one gist matching the
instance filename.
:param matches: (list) of dictioaries generated within select_gists()
:return: (dict) of the selected gist
"""
# ask user which gist to use
self.hey("Use {} from which gist?".format(self.filename))
for count, gist in enumerate(matches, 1):
self.hey("[{}] {}".format(count, gist.get("description")))
# get the gist index
selected = False
while not selected:
gist_index = prompt("Type the number: ", type=int) - 1
try:
selected = matches[gist_index]
except IndexError:
self.oops("Invalid number, please try again.")
self.output("Using `{}` Gist".format(selected["description"]))
return selected
def _api_url(self, *args):
"""Get entrypoints adding arguments separated by slashes"""
return self.api_root_url + "/".join(args)
@staticmethod
def _parse_gist(gist):
"""Receive a gist (dict) and parse it to GetGist"""
# parse files
files = list()
file_names = sorted(filename for filename in gist["files"].keys())
for name in file_names:
files.append(
dict(filename=name, raw_url=gist["files"][name].get("raw_url"))
)
# parse description
description = gist["description"]
if not description:
names = sorted(f.get("filename") for f in files)
description = names.pop(0)
return dict(
description=description,
id=gist.get("id"),
files=files,
url=gist.get("html_url"),
)
@staticmethod
def _get_token():
"""Retrieve username from env var"""
return os.getenv("GETGIST_TOKEN")
|
cuducos/getgist | getgist/github.py | GitHubTools._ask_which_gist | python | def _ask_which_gist(self, matches):
# ask user which gist to use
self.hey("Use {} from which gist?".format(self.filename))
for count, gist in enumerate(matches, 1):
self.hey("[{}] {}".format(count, gist.get("description")))
# get the gist index
selected = False
while not selected:
gist_index = prompt("Type the number: ", type=int) - 1
try:
selected = matches[gist_index]
except IndexError:
self.oops("Invalid number, please try again.")
self.output("Using `{}` Gist".format(selected["description"]))
return selected | Asks user which gist to use in case of more than one gist matching the
instance filename.
:param matches: (list) of dictioaries generated within select_gists()
:return: (dict) of the selected gist | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L229-L251 | [
"def output(self, message, color=None):\n \"\"\"\n A helper to used like print() or click's secho() tunneling all the\n outputs to sys.stdout or sys.stderr\n :param message: (str)\n :param color: (str) check click.secho() documentation\n :return: (None) prints to sys.stdout or sys.stderr\n \"\"... | class GitHubTools(GetGistCommons):
"""Helpers to deal with GitHub API and manipulate gists"""
version = get_distribution("getgist").version
api_root_url = "https://api.github.com/"
headers = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "GetGist v{}".format(version),
}
requests = GetGistRequests(headers)
is_authenticated = False
def __init__(self, user, file_path, assume_yes=False):
"""
Save basic variables to all methods, instantiate GetGistrequests and
calls the OAuth method.
:param user: (str) GitHub username
:param file_path: (str) file_path to be saved (locally), created or
updated (remotelly)
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
self.user = user
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.assume_yes = assume_yes
self.add_oauth_header()
def add_oauth_header(self):
"""
Validate token and add the proper header for further requests.
:return: (None)
"""
# abort if no token
oauth_token = self._get_token()
if not oauth_token:
return
# add oauth header & reach the api
self.headers["Authorization"] = "token " + oauth_token
url = self._api_url("user")
raw_resp = self.requests.get(url)
resp = raw_resp.json()
# abort & remove header if token is invalid
if resp.get("login", None) != self.user:
self.oops("Invalid token for user " + self.user)
self.headers.pop("Authorization")
return
self.is_authenticated = True
self.yeah("User {} authenticated".format(self.user))
def get_gists(self):
"""
List generator containing gist relevant information
such as id, description, filenames and raw URL (dict).
"""
# fetch all gists
if self.is_authenticated:
url = self._api_url("gists")
else:
url = self._api_url("users", self.user, "gists")
self.output("Fetching " + url)
raw_resp = self.requests.get(url)
# abort if user not found
if raw_resp.status_code != 200:
self.oops("User `{}` not found".format(self.user))
return
# abort if there are no gists
resp = raw_resp.json()
if not resp:
self.oops("No gists found for user `{}`".format(self.user))
return
# parse response
for gist in raw_resp.json():
yield self._parse_gist(gist)
def select_gist(self, allow_none=False):
"""
Given the requested filename, it selects the proper gist; if more than
one gist is found with the given filename, user is asked to choose.
:allow_none: (bool) for `getgist` it should raise error if no gist is
found, but setting this argument to True avoid this error, which is
useful when `putgist` is calling this method
:return: (dict) selected gist
"""
# pick up all macthing gists
matches = list()
for gist in self.get_gists():
for gist_file in gist.get("files"):
if self.filename == gist_file.get("filename"):
matches.append(gist)
# abort if no match is found
if not matches:
if allow_none:
return None
else:
msg = "No file named `{}` found in {}'s gists"
self.oops(msg.format(self.file_path, self.user))
if not self.is_authenticated:
self.warn("To access private gists set the GETGIST_TOKEN")
self.warn("(see `getgist --help` for details)")
return False
# return if there's is only one match
if len(matches) == 1 or self.assume_yes:
return matches.pop(0)
return self._ask_which_gist(matches)
def read_gist_file(self, gist):
"""
Returns the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse()
:return: (bytes) content of a gist loaded from GitHub
"""
url = False
files = gist.get("files")
for gist_file in files:
if gist_file.get("filename") == self.filename:
url = gist_file.get("raw_url")
break
if url:
self.output("Reading {}".format(url))
response = self.requests.get(url)
return response.content
@oauth_only
def update(self, gist, content):
"""
Updates the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse_gist()
:param content: (str or bytes) to be written
:return: (bool) indicatind the success or failure of the update
"""
# abort if content is False
if content is False:
return False
# request
url = self._api_url("gists", gist.get("id"))
data = {"files": {self.filename: {"content": content}}}
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.patch(url, data=dumps(data))
# error
if response.status_code != 200:
self.oops("Could not update " + gist.get("description"))
self.oops("PATCH request returned " + str(response.status_code))
return False
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
@oauth_only
def create(self, content, **kwargs):
"""
Create a new gist.
:param gist: (dict) gist parsed by GitHubTools._parse()
:param content: (str or bytes) to be written
:param public: (bool) defines if the gist is public or private
:return: (bool) indicatind the success or failure of the creation
"""
# abort if content is False
if content is False:
return False
# set new gist
public = bool(kwargs.get("public", True))
data = {
"description": self.filename,
"public": public,
"files": {self.filename: {"content": content}},
}
# send request
url = self._api_url("gists")
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.post(url, data=dumps(data))
# error
if response.status_code != 201:
self.oops("Could not create " + self.filename)
self.oops("POST request returned " + str(response.status_code))
return False
# parse created gist
gist = self._parse_gist(response.json())
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
def _api_url(self, *args):
"""Get entrypoints adding arguments separated by slashes"""
return self.api_root_url + "/".join(args)
@staticmethod
def _parse_gist(gist):
"""Receive a gist (dict) and parse it to GetGist"""
# parse files
files = list()
file_names = sorted(filename for filename in gist["files"].keys())
for name in file_names:
files.append(
dict(filename=name, raw_url=gist["files"][name].get("raw_url"))
)
# parse description
description = gist["description"]
if not description:
names = sorted(f.get("filename") for f in files)
description = names.pop(0)
return dict(
description=description,
id=gist.get("id"),
files=files,
url=gist.get("html_url"),
)
@staticmethod
def _get_token():
"""Retrieve username from env var"""
return os.getenv("GETGIST_TOKEN")
|
cuducos/getgist | getgist/github.py | GitHubTools._parse_gist | python | def _parse_gist(gist):
# parse files
files = list()
file_names = sorted(filename for filename in gist["files"].keys())
for name in file_names:
files.append(
dict(filename=name, raw_url=gist["files"][name].get("raw_url"))
)
# parse description
description = gist["description"]
if not description:
names = sorted(f.get("filename") for f in files)
description = names.pop(0)
return dict(
description=description,
id=gist.get("id"),
files=files,
url=gist.get("html_url"),
) | Receive a gist (dict) and parse it to GetGist | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L258-L280 | null | class GitHubTools(GetGistCommons):
"""Helpers to deal with GitHub API and manipulate gists"""
version = get_distribution("getgist").version
api_root_url = "https://api.github.com/"
headers = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "GetGist v{}".format(version),
}
requests = GetGistRequests(headers)
is_authenticated = False
def __init__(self, user, file_path, assume_yes=False):
"""
Save basic variables to all methods, instantiate GetGistrequests and
calls the OAuth method.
:param user: (str) GitHub username
:param file_path: (str) file_path to be saved (locally), created or
updated (remotelly)
:param assume_yes: (bool) assume yes (or first option) for all prompts
:return: (None)
"""
self.user = user
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.assume_yes = assume_yes
self.add_oauth_header()
def add_oauth_header(self):
"""
Validate token and add the proper header for further requests.
:return: (None)
"""
# abort if no token
oauth_token = self._get_token()
if not oauth_token:
return
# add oauth header & reach the api
self.headers["Authorization"] = "token " + oauth_token
url = self._api_url("user")
raw_resp = self.requests.get(url)
resp = raw_resp.json()
# abort & remove header if token is invalid
if resp.get("login", None) != self.user:
self.oops("Invalid token for user " + self.user)
self.headers.pop("Authorization")
return
self.is_authenticated = True
self.yeah("User {} authenticated".format(self.user))
def get_gists(self):
"""
List generator containing gist relevant information
such as id, description, filenames and raw URL (dict).
"""
# fetch all gists
if self.is_authenticated:
url = self._api_url("gists")
else:
url = self._api_url("users", self.user, "gists")
self.output("Fetching " + url)
raw_resp = self.requests.get(url)
# abort if user not found
if raw_resp.status_code != 200:
self.oops("User `{}` not found".format(self.user))
return
# abort if there are no gists
resp = raw_resp.json()
if not resp:
self.oops("No gists found for user `{}`".format(self.user))
return
# parse response
for gist in raw_resp.json():
yield self._parse_gist(gist)
def select_gist(self, allow_none=False):
"""
Given the requested filename, it selects the proper gist; if more than
one gist is found with the given filename, user is asked to choose.
:allow_none: (bool) for `getgist` it should raise error if no gist is
found, but setting this argument to True avoid this error, which is
useful when `putgist` is calling this method
:return: (dict) selected gist
"""
# pick up all macthing gists
matches = list()
for gist in self.get_gists():
for gist_file in gist.get("files"):
if self.filename == gist_file.get("filename"):
matches.append(gist)
# abort if no match is found
if not matches:
if allow_none:
return None
else:
msg = "No file named `{}` found in {}'s gists"
self.oops(msg.format(self.file_path, self.user))
if not self.is_authenticated:
self.warn("To access private gists set the GETGIST_TOKEN")
self.warn("(see `getgist --help` for details)")
return False
# return if there's is only one match
if len(matches) == 1 or self.assume_yes:
return matches.pop(0)
return self._ask_which_gist(matches)
def read_gist_file(self, gist):
"""
Returns the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse()
:return: (bytes) content of a gist loaded from GitHub
"""
url = False
files = gist.get("files")
for gist_file in files:
if gist_file.get("filename") == self.filename:
url = gist_file.get("raw_url")
break
if url:
self.output("Reading {}".format(url))
response = self.requests.get(url)
return response.content
@oauth_only
def update(self, gist, content):
"""
Updates the contents of file hosted inside a gist at GitHub.
:param gist: (dict) gist parsed by GitHubTools._parse_gist()
:param content: (str or bytes) to be written
:return: (bool) indicatind the success or failure of the update
"""
# abort if content is False
if content is False:
return False
# request
url = self._api_url("gists", gist.get("id"))
data = {"files": {self.filename: {"content": content}}}
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.patch(url, data=dumps(data))
# error
if response.status_code != 200:
self.oops("Could not update " + gist.get("description"))
self.oops("PATCH request returned " + str(response.status_code))
return False
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
@oauth_only
def create(self, content, **kwargs):
"""
Create a new gist.
:param gist: (dict) gist parsed by GitHubTools._parse()
:param content: (str or bytes) to be written
:param public: (bool) defines if the gist is public or private
:return: (bool) indicatind the success or failure of the creation
"""
# abort if content is False
if content is False:
return False
# set new gist
public = bool(kwargs.get("public", True))
data = {
"description": self.filename,
"public": public,
"files": {self.filename: {"content": content}},
}
# send request
url = self._api_url("gists")
self.output("Sending contents of {} to {}".format(self.file_path, url))
response = self.requests.post(url, data=dumps(data))
# error
if response.status_code != 201:
self.oops("Could not create " + self.filename)
self.oops("POST request returned " + str(response.status_code))
return False
# parse created gist
gist = self._parse_gist(response.json())
# success
self.yeah("Done!")
self.hey("The URL to this Gist is: {}".format(gist["url"]))
return True
def _ask_which_gist(self, matches):
"""
Asks user which gist to use in case of more than one gist matching the
instance filename.
:param matches: (list) of dictioaries generated within select_gists()
:return: (dict) of the selected gist
"""
# ask user which gist to use
self.hey("Use {} from which gist?".format(self.filename))
for count, gist in enumerate(matches, 1):
self.hey("[{}] {}".format(count, gist.get("description")))
# get the gist index
selected = False
while not selected:
gist_index = prompt("Type the number: ", type=int) - 1
try:
selected = matches[gist_index]
except IndexError:
self.oops("Invalid number, please try again.")
self.output("Using `{}` Gist".format(selected["description"]))
return selected
def _api_url(self, *args):
"""Get entrypoints adding arguments separated by slashes"""
return self.api_root_url + "/".join(args)
@staticmethod
@staticmethod
def _get_token():
"""Retrieve username from env var"""
return os.getenv("GETGIST_TOKEN")
|
cuducos/getgist | getgist/__init__.py | GetGistCommons.indent | python | def indent(self, message):
indent = self.indent_char * self.indent_size
return indent + message | Sets the indent for standardized output
:param message: (str)
:return: (str) | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__init__.py#L12-L19 | null | class GetGistCommons(object):
"""Basic output methods used to print messages on users' terminal"""
indent_size = 2
indent_char = " "
def output(self, message, color=None):
"""
A helper to used like print() or click's secho() tunneling all the
outputs to sys.stdout or sys.stderr
:param message: (str)
:param color: (str) check click.secho() documentation
:return: (None) prints to sys.stdout or sys.stderr
"""
output_to = stderr if color == "red" else stdout
secho(self.indent(message), fg=color, file=output_to)
def oops(self, message):
"""Helper to colorize error messages"""
return self.output(message, color="red")
def yeah(self, message):
"""Helper to colorize success messages"""
return self.output(message, color="green")
def warn(self, message):
"""Helper to colorize warning messages"""
return self.output(message, color="yellow")
def hey(self, message):
"""Helper to colorize highlihghted messages"""
return self.output(message, color="blue")
|
cuducos/getgist | getgist/__init__.py | GetGistCommons.output | python | def output(self, message, color=None):
output_to = stderr if color == "red" else stdout
secho(self.indent(message), fg=color, file=output_to) | A helper to used like print() or click's secho() tunneling all the
outputs to sys.stdout or sys.stderr
:param message: (str)
:param color: (str) check click.secho() documentation
:return: (None) prints to sys.stdout or sys.stderr | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__init__.py#L21-L30 | [
"def indent(self, message):\n \"\"\"\n Sets the indent for standardized output\n :param message: (str)\n :return: (str)\n \"\"\"\n indent = self.indent_char * self.indent_size\n return indent + message\n"
] | class GetGistCommons(object):
"""Basic output methods used to print messages on users' terminal"""
indent_size = 2
indent_char = " "
def indent(self, message):
"""
Sets the indent for standardized output
:param message: (str)
:return: (str)
"""
indent = self.indent_char * self.indent_size
return indent + message
def oops(self, message):
"""Helper to colorize error messages"""
return self.output(message, color="red")
def yeah(self, message):
"""Helper to colorize success messages"""
return self.output(message, color="green")
def warn(self, message):
"""Helper to colorize warning messages"""
return self.output(message, color="yellow")
def hey(self, message):
"""Helper to colorize highlihghted messages"""
return self.output(message, color="blue")
|
cuducos/getgist | getgist/request.py | GetGistRequests.get | python | def get(self, url, params=None, **kwargs):
return requests.get(url, params=params, headers=self.add_headers(**kwargs)) | Encapsulte requests.get to use this class instance header | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/request.py#L26-L28 | [
"def add_headers(self, **headers):\n \"\"\"\n Add any extra header to the existng header object.\n :param kwargs: key/value pairs\n :return: (dict)\n \"\"\"\n headers.update(self.headers)\n return headers\n"
] | class GetGistRequests(object):
"""Encapsulate requests lib to always send self.headers as headers"""
def __init__(self, headers=None):
"""
Get a header object to use it in all requests
:param headers: (dict)
:return: (None)
"""
if not headers:
headers = dict()
self.headers = headers
def add_headers(self, **headers):
"""
Add any extra header to the existng header object.
:param kwargs: key/value pairs
:return: (dict)
"""
headers.update(self.headers)
return headers
def patch(self, url, data=None, **kwargs):
"""Encapsulte requests.patch to use this class instance header"""
return requests.patch(url, data=data, headers=self.add_headers(**kwargs))
def post(self, url, data=None, **kwargs):
"""Encapsulte requests.post to use this class instance header"""
return requests.post(url, data=data, headers=self.add_headers(**kwargs))
|
cuducos/getgist | getgist/request.py | GetGistRequests.patch | python | def patch(self, url, data=None, **kwargs):
return requests.patch(url, data=data, headers=self.add_headers(**kwargs)) | Encapsulte requests.patch to use this class instance header | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/request.py#L30-L32 | [
"def add_headers(self, **headers):\n \"\"\"\n Add any extra header to the existng header object.\n :param kwargs: key/value pairs\n :return: (dict)\n \"\"\"\n headers.update(self.headers)\n return headers\n"
] | class GetGistRequests(object):
"""Encapsulate requests lib to always send self.headers as headers"""
def __init__(self, headers=None):
"""
Get a header object to use it in all requests
:param headers: (dict)
:return: (None)
"""
if not headers:
headers = dict()
self.headers = headers
def add_headers(self, **headers):
"""
Add any extra header to the existng header object.
:param kwargs: key/value pairs
:return: (dict)
"""
headers.update(self.headers)
return headers
def get(self, url, params=None, **kwargs):
"""Encapsulte requests.get to use this class instance header"""
return requests.get(url, params=params, headers=self.add_headers(**kwargs))
def post(self, url, data=None, **kwargs):
"""Encapsulte requests.post to use this class instance header"""
return requests.post(url, data=data, headers=self.add_headers(**kwargs))
|
cuducos/getgist | getgist/request.py | GetGistRequests.post | python | def post(self, url, data=None, **kwargs):
return requests.post(url, data=data, headers=self.add_headers(**kwargs)) | Encapsulte requests.post to use this class instance header | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/request.py#L34-L36 | [
"def add_headers(self, **headers):\n \"\"\"\n Add any extra header to the existng header object.\n :param kwargs: key/value pairs\n :return: (dict)\n \"\"\"\n headers.update(self.headers)\n return headers\n"
] | class GetGistRequests(object):
"""Encapsulate requests lib to always send self.headers as headers"""
def __init__(self, headers=None):
"""
Get a header object to use it in all requests
:param headers: (dict)
:return: (None)
"""
if not headers:
headers = dict()
self.headers = headers
def add_headers(self, **headers):
"""
Add any extra header to the existng header object.
:param kwargs: key/value pairs
:return: (dict)
"""
headers.update(self.headers)
return headers
def get(self, url, params=None, **kwargs):
"""Encapsulte requests.get to use this class instance header"""
return requests.get(url, params=params, headers=self.add_headers(**kwargs))
def patch(self, url, data=None, **kwargs):
"""Encapsulte requests.patch to use this class instance header"""
return requests.patch(url, data=data, headers=self.add_headers(**kwargs))
|
cuducos/getgist | getgist/local.py | LocalTools.save | python | def save(self, content):
# backup existing file if needed
if os.path.exists(self.file_path) and not self.assume_yes:
message = "Overwrite existing {}? (y/n) "
if not confirm(message.format(self.filename)):
self.backup()
# write file
self.output("Saving " + self.filename)
with open(self.file_path, "wb") as handler:
if not isinstance(content, bytes):
content = bytes(content, "utf-8")
handler.write(content)
self.yeah("Done!") | Save any given content to the instance file.
:param content: (str or bytes)
:return: (None) | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/local.py#L23-L41 | null | class LocalTools(GetGistCommons):
"""Helpers to deal with local files and local file system"""
def __init__(self, filename, assume_yes=False):
"""
Sets the file name to be used by the instance.
:param filename: (str) local file name (ro be read or written)
:param assume_yes: (bool) assume yes (or first option) for all prompts
return: (None)
"""
self.cwd = os.getcwd()
self.file_path = os.path.expanduser(filename)
self.filename = os.path.basename(filename)
self.assume_yes = assume_yes
def backup(self):
"""Backups files with the same name of the instance filename"""
count = 0
name = "{}.bkp".format(self.filename)
backup = os.path.join(self.cwd, name)
while os.path.exists(backup):
count += 1
name = "{}.bkp{}".format(self.filename, count)
backup = os.path.join(self.cwd, name)
self.hey("Moving existing {} to {}".format(self.filename, name))
os.rename(os.path.join(self.cwd, self.filename), backup)
def read(self, file_path=None):
"""
Read the contents of a file.
:param filename: (str) path to a file in the local file system
:return: (str) contents of the file, or (False) if not found/not file
"""
if not file_path:
file_path = self.file_path
# abort if the file path does not exist
if not os.path.exists(file_path):
self.oops("Sorry, but {} does not exist".format(file_path))
return False
# abort if the file path is not a file
if not os.path.isfile(file_path):
self.oops("Sorry, but {} is not a file".format(file_path))
return False
with open(file_path) as handler:
return handler.read()
|
cuducos/getgist | getgist/local.py | LocalTools.backup | python | def backup(self):
count = 0
name = "{}.bkp".format(self.filename)
backup = os.path.join(self.cwd, name)
while os.path.exists(backup):
count += 1
name = "{}.bkp{}".format(self.filename, count)
backup = os.path.join(self.cwd, name)
self.hey("Moving existing {} to {}".format(self.filename, name))
os.rename(os.path.join(self.cwd, self.filename), backup) | Backups files with the same name of the instance filename | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/local.py#L43-L53 | [
"def hey(self, message):\n \"\"\"Helper to colorize highlihghted messages\"\"\"\n return self.output(message, color=\"blue\")\n"
] | class LocalTools(GetGistCommons):
"""Helpers to deal with local files and local file system"""
def __init__(self, filename, assume_yes=False):
"""
Sets the file name to be used by the instance.
:param filename: (str) local file name (ro be read or written)
:param assume_yes: (bool) assume yes (or first option) for all prompts
return: (None)
"""
self.cwd = os.getcwd()
self.file_path = os.path.expanduser(filename)
self.filename = os.path.basename(filename)
self.assume_yes = assume_yes
def save(self, content):
"""
Save any given content to the instance file.
:param content: (str or bytes)
:return: (None)
"""
# backup existing file if needed
if os.path.exists(self.file_path) and not self.assume_yes:
message = "Overwrite existing {}? (y/n) "
if not confirm(message.format(self.filename)):
self.backup()
# write file
self.output("Saving " + self.filename)
with open(self.file_path, "wb") as handler:
if not isinstance(content, bytes):
content = bytes(content, "utf-8")
handler.write(content)
self.yeah("Done!")
def read(self, file_path=None):
"""
Read the contents of a file.
:param filename: (str) path to a file in the local file system
:return: (str) contents of the file, or (False) if not found/not file
"""
if not file_path:
file_path = self.file_path
# abort if the file path does not exist
if not os.path.exists(file_path):
self.oops("Sorry, but {} does not exist".format(file_path))
return False
# abort if the file path is not a file
if not os.path.isfile(file_path):
self.oops("Sorry, but {} is not a file".format(file_path))
return False
with open(file_path) as handler:
return handler.read()
|
cuducos/getgist | getgist/local.py | LocalTools.read | python | def read(self, file_path=None):
if not file_path:
file_path = self.file_path
# abort if the file path does not exist
if not os.path.exists(file_path):
self.oops("Sorry, but {} does not exist".format(file_path))
return False
# abort if the file path is not a file
if not os.path.isfile(file_path):
self.oops("Sorry, but {} is not a file".format(file_path))
return False
with open(file_path) as handler:
return handler.read() | Read the contents of a file.
:param filename: (str) path to a file in the local file system
:return: (str) contents of the file, or (False) if not found/not file | train | https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/local.py#L55-L75 | [
"def oops(self, message):\n \"\"\"Helper to colorize error messages\"\"\"\n return self.output(message, color=\"red\")\n"
] | class LocalTools(GetGistCommons):
"""Helpers to deal with local files and local file system"""
def __init__(self, filename, assume_yes=False):
"""
Sets the file name to be used by the instance.
:param filename: (str) local file name (ro be read or written)
:param assume_yes: (bool) assume yes (or first option) for all prompts
return: (None)
"""
self.cwd = os.getcwd()
self.file_path = os.path.expanduser(filename)
self.filename = os.path.basename(filename)
self.assume_yes = assume_yes
def save(self, content):
"""
Save any given content to the instance file.
:param content: (str or bytes)
:return: (None)
"""
# backup existing file if needed
if os.path.exists(self.file_path) and not self.assume_yes:
message = "Overwrite existing {}? (y/n) "
if not confirm(message.format(self.filename)):
self.backup()
# write file
self.output("Saving " + self.filename)
with open(self.file_path, "wb") as handler:
if not isinstance(content, bytes):
content = bytes(content, "utf-8")
handler.write(content)
self.yeah("Done!")
def backup(self):
"""Backups files with the same name of the instance filename"""
count = 0
name = "{}.bkp".format(self.filename)
backup = os.path.join(self.cwd, name)
while os.path.exists(backup):
count += 1
name = "{}.bkp{}".format(self.filename, count)
backup = os.path.join(self.cwd, name)
self.hey("Moving existing {} to {}".format(self.filename, name))
os.rename(os.path.join(self.cwd, self.filename), backup)
|
cgoldberg/sauceclient | sauceclient.py | SauceClient.get_auth_string | python | def get_auth_string(self):
auth_info = '{}:{}'.format(self.sauce_username, self.sauce_access_key)
return base64.b64encode(auth_info.encode('utf-8')).decode('utf-8') | Create auth string from credentials. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L59-L62 | null | class SauceClient(object):
"""SauceClient class."""
def __init__(self, sauce_username=None, sauce_access_key=None, apibase=None):
"""Initialize class."""
self.sauce_username = sauce_username
self.sauce_access_key = sauce_access_key
self.apibase = apibase or 'saucelabs.com'
self.headers = self.make_headers()
self.account = Account(self)
self.information = Information(self)
self.javascript = JavaScriptTests(self)
self.jobs = Jobs(self)
self.storage = Storage(self)
self.tunnels = Tunnels(self)
self.analytics = Analytics(self)
def make_headers(self, content_type='application/json'):
"""Create content-type header."""
return {
'Content-Type': content_type,
}
def make_auth_headers(self, content_type):
"""Add authorization header."""
headers = self.make_headers(content_type)
headers['Authorization'] = 'Basic {}'.format(self.get_auth_string())
return headers
def request(self, method, url, body=None, content_type='application/json'):
"""Send http request."""
headers = self.make_auth_headers(content_type)
connection = http_client.HTTPSConnection(self.apibase)
connection.request(method, url, body, headers=headers)
response = connection.getresponse()
data = response.read()
connection.close()
if response.status not in [200, 201]:
raise SauceException('{}: {}.\nSauce Status NOT OK'.format(
response.status, response.reason), response=response)
return json.loads(data.decode('utf-8'))
|
cgoldberg/sauceclient | sauceclient.py | SauceClient.make_auth_headers | python | def make_auth_headers(self, content_type):
headers = self.make_headers(content_type)
headers['Authorization'] = 'Basic {}'.format(self.get_auth_string())
return headers | Add authorization header. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L70-L74 | [
"def get_auth_string(self):\n \"\"\"Create auth string from credentials.\"\"\"\n auth_info = '{}:{}'.format(self.sauce_username, self.sauce_access_key)\n return base64.b64encode(auth_info.encode('utf-8')).decode('utf-8')\n",
"def make_headers(self, content_type='application/json'):\n \"\"\"Create cont... | class SauceClient(object):
"""SauceClient class."""
def __init__(self, sauce_username=None, sauce_access_key=None, apibase=None):
"""Initialize class."""
self.sauce_username = sauce_username
self.sauce_access_key = sauce_access_key
self.apibase = apibase or 'saucelabs.com'
self.headers = self.make_headers()
self.account = Account(self)
self.information = Information(self)
self.javascript = JavaScriptTests(self)
self.jobs = Jobs(self)
self.storage = Storage(self)
self.tunnels = Tunnels(self)
self.analytics = Analytics(self)
def get_auth_string(self):
"""Create auth string from credentials."""
auth_info = '{}:{}'.format(self.sauce_username, self.sauce_access_key)
return base64.b64encode(auth_info.encode('utf-8')).decode('utf-8')
def make_headers(self, content_type='application/json'):
"""Create content-type header."""
return {
'Content-Type': content_type,
}
def request(self, method, url, body=None, content_type='application/json'):
"""Send http request."""
headers = self.make_auth_headers(content_type)
connection = http_client.HTTPSConnection(self.apibase)
connection.request(method, url, body, headers=headers)
response = connection.getresponse()
data = response.read()
connection.close()
if response.status not in [200, 201]:
raise SauceException('{}: {}.\nSauce Status NOT OK'.format(
response.status, response.reason), response=response)
return json.loads(data.decode('utf-8'))
|
cgoldberg/sauceclient | sauceclient.py | SauceClient.request | python | def request(self, method, url, body=None, content_type='application/json'):
headers = self.make_auth_headers(content_type)
connection = http_client.HTTPSConnection(self.apibase)
connection.request(method, url, body, headers=headers)
response = connection.getresponse()
data = response.read()
connection.close()
if response.status not in [200, 201]:
raise SauceException('{}: {}.\nSauce Status NOT OK'.format(
response.status, response.reason), response=response)
return json.loads(data.decode('utf-8')) | Send http request. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L76-L87 | [
"def make_auth_headers(self, content_type):\n \"\"\"Add authorization header.\"\"\"\n headers = self.make_headers(content_type)\n headers['Authorization'] = 'Basic {}'.format(self.get_auth_string())\n return headers\n"
] | class SauceClient(object):
"""SauceClient class."""
def __init__(self, sauce_username=None, sauce_access_key=None, apibase=None):
"""Initialize class."""
self.sauce_username = sauce_username
self.sauce_access_key = sauce_access_key
self.apibase = apibase or 'saucelabs.com'
self.headers = self.make_headers()
self.account = Account(self)
self.information = Information(self)
self.javascript = JavaScriptTests(self)
self.jobs = Jobs(self)
self.storage = Storage(self)
self.tunnels = Tunnels(self)
self.analytics = Analytics(self)
def get_auth_string(self):
"""Create auth string from credentials."""
auth_info = '{}:{}'.format(self.sauce_username, self.sauce_access_key)
return base64.b64encode(auth_info.encode('utf-8')).decode('utf-8')
def make_headers(self, content_type='application/json'):
"""Create content-type header."""
return {
'Content-Type': content_type,
}
def make_auth_headers(self, content_type):
"""Add authorization header."""
headers = self.make_headers(content_type)
headers['Authorization'] = 'Basic {}'.format(self.get_auth_string())
return headers
|
cgoldberg/sauceclient | sauceclient.py | Account.get_user | python | def get_user(self):
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint) | Access basic account information. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L100-L104 | null | class Account(object):
"""Account Methods
These methods provide user account information and management.
- https://wiki.saucelabs.com/display/DOCS/Account+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def create_user(self, username, password, name, email):
"""Create a sub account."""
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body)
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccount_info(self):
"""Get information about a sub account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def change_access_key(self):
"""Change access key of your account."""
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_activity(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def get_usage(self, start=None, end=None):
"""Access historical account usage data."""
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Account.create_user | python | def create_user(self, username, password, name, email):
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body) | Create a sub account. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L106-L112 | null | class Account(object):
"""Account Methods
These methods provide user account information and management.
- https://wiki.saucelabs.com/display/DOCS/Account+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccount_info(self):
"""Get information about a sub account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def change_access_key(self):
"""Change access key of your account."""
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_activity(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def get_usage(self, start=None, end=None):
"""Access historical account usage data."""
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Account.get_concurrency | python | def get_concurrency(self):
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Check account concurrency limits. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L114-L119 | null | class Account(object):
"""Account Methods
These methods provide user account information and management.
- https://wiki.saucelabs.com/display/DOCS/Account+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def create_user(self, username, password, name, email):
"""Create a sub account."""
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body)
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccount_info(self):
"""Get information about a sub account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def change_access_key(self):
"""Change access key of your account."""
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_activity(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def get_usage(self, start=None, end=None):
"""Access historical account usage data."""
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Account.get_subaccounts | python | def get_subaccounts(self):
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Get a list of sub accounts associated with a parent account. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L121-L126 | null | class Account(object):
"""Account Methods
These methods provide user account information and management.
- https://wiki.saucelabs.com/display/DOCS/Account+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def create_user(self, username, password, name, email):
"""Create a sub account."""
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body)
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccount_info(self):
"""Get information about a sub account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def change_access_key(self):
"""Change access key of your account."""
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_activity(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def get_usage(self, start=None, end=None):
"""Access historical account usage data."""
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Account.get_siblings | python | def get_siblings(self):
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Get a list of sibling accounts associated with provided account. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L128-L133 | null | class Account(object):
"""Account Methods
These methods provide user account information and management.
- https://wiki.saucelabs.com/display/DOCS/Account+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def create_user(self, username, password, name, email):
"""Create a sub account."""
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body)
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccount_info(self):
"""Get information about a sub account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def change_access_key(self):
"""Change access key of your account."""
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_activity(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def get_usage(self, start=None, end=None):
"""Access historical account usage data."""
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Account.get_subaccount_info | python | def get_subaccount_info(self):
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Get information about a sub account. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L135-L140 | null | class Account(object):
"""Account Methods
These methods provide user account information and management.
- https://wiki.saucelabs.com/display/DOCS/Account+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def create_user(self, username, password, name, email):
"""Create a sub account."""
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body)
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def change_access_key(self):
"""Change access key of your account."""
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_activity(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def get_usage(self, start=None, end=None):
"""Access historical account usage data."""
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Account.change_access_key | python | def change_access_key(self):
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Change access key of your account. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L142-L147 | null | class Account(object):
"""Account Methods
These methods provide user account information and management.
- https://wiki.saucelabs.com/display/DOCS/Account+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def create_user(self, username, password, name, email):
"""Create a sub account."""
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body)
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccount_info(self):
"""Get information about a sub account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_activity(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def get_usage(self, start=None, end=None):
"""Access historical account usage data."""
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Account.get_activity | python | def get_activity(self):
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint) | Check account concurrency limits. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L149-L153 | null | class Account(object):
"""Account Methods
These methods provide user account information and management.
- https://wiki.saucelabs.com/display/DOCS/Account+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def create_user(self, username, password, name, email):
"""Create a sub account."""
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body)
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccount_info(self):
"""Get information about a sub account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def change_access_key(self):
"""Change access key of your account."""
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_usage(self, start=None, end=None):
"""Access historical account usage data."""
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Account.get_usage | python | def get_usage(self, start=None, end=None):
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint) | Access historical account usage data. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L155-L166 | null | class Account(object):
"""Account Methods
These methods provide user account information and management.
- https://wiki.saucelabs.com/display/DOCS/Account+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def create_user(self, username, password, name, email):
"""Create a sub account."""
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body)
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_subaccount_info(self):
"""Get information about a sub account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def change_access_key(self):
"""Change access key of your account."""
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint)
def get_activity(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Information.get_platforms | python | def get_platforms(self, automation_api='all'):
method = 'GET'
endpoint = '/rest/v1/info/platforms/{}'.format(automation_api)
return self.client.request(method, endpoint) | Get a list of objects describing all the OS and browser platforms
currently supported on Sauce Labs. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L351-L356 | null | class Information(object):
"""Information Methods
Information resources are publicly available data about
Sauce Lab's service.
- https://wiki.saucelabs.com/display/DOCS/Information+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_status(self):
"""Get the current status of Sauce Labs services."""
method = 'GET'
endpoint = '/rest/v1/info/status'
return self.client.request(method, endpoint)
def get_appium_eol_dates(self):
"""Get a list of Appium end-of-life dates. Dates are displayed in Unix
time."""
method = 'GET'
endpoint = '/rest/v1/info/platforms/appium/eol'
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Jobs.get_jobs | python | def get_jobs(self, full=None, limit=None, skip=None, start=None, end=None,
output_format=None):
method = 'GET'
endpoint = '/rest/v1/{}/jobs'.format(self.client.sauce_username)
data = {}
if full is not None:
data['full'] = full
if limit is not None:
data['limit'] = limit
if skip is not None:
data['skip'] = skip
if start is not None:
data['from'] = start
if end is not None:
data['to'] = end
if output_format is not None:
data['format'] = output_format
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint) | List jobs belonging to a specific user. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L403-L423 | null | class Jobs(object):
"""Job Methods
- https://wiki.saucelabs.com/display/DOCS/Job+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_job(self, job_id):
"""Retreive a single job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def update_job(self, job_id, build=None, custom_data=None,
name=None, passed=None, public=None, tags=None):
"""Edit an existing job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
data = {}
if build is not None:
data['build'] = build
if custom_data is not None:
data['custom-data'] = custom_data
if name is not None:
data['name'] = name
if passed is not None:
data['passed'] = passed
if public is not None:
data['public'] = public
if tags is not None:
data['tags'] = tags
body = json.dumps(data)
return self.client.request(method, endpoint, body=body)
def delete_job(self, job_id):
"""Removes the job from the system with all the linked assets."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def stop_job(self, job_id):
"""Terminates a running job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}/stop'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_job_assets(self, job_id):
"""Get details about the static assets collected for a specific job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_job_asset_url(self, job_id, filename):
"""Get details about the static assets collected for a specific job."""
return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}'.format(
self.client.sauce_username, job_id, filename)
def delete_job_assets(self, job_id):
"""Delete all the assets captured during a test run."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_auth_token(self, job_id, date_range=None):
"""Get an auth token to access protected job resources.
https://wiki.saucelabs.com/display/DOCS/Building+Links+to+Test+Results
"""
key = '{}:{}'.format(self.client.sauce_username,
self.client.sauce_access_key)
if date_range:
key = '{}:{}'.format(key, date_range)
return hmac.new(key.encode('utf-8'), job_id.encode('utf-8'),
md5).hexdigest()
|
cgoldberg/sauceclient | sauceclient.py | Jobs.update_job | python | def update_job(self, job_id, build=None, custom_data=None,
name=None, passed=None, public=None, tags=None):
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
data = {}
if build is not None:
data['build'] = build
if custom_data is not None:
data['custom-data'] = custom_data
if name is not None:
data['name'] = name
if passed is not None:
data['passed'] = passed
if public is not None:
data['public'] = public
if tags is not None:
data['tags'] = tags
body = json.dumps(data)
return self.client.request(method, endpoint, body=body) | Edit an existing job. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L432-L452 | null | class Jobs(object):
"""Job Methods
- https://wiki.saucelabs.com/display/DOCS/Job+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_jobs(self, full=None, limit=None, skip=None, start=None, end=None,
output_format=None):
"""List jobs belonging to a specific user."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs'.format(self.client.sauce_username)
data = {}
if full is not None:
data['full'] = full
if limit is not None:
data['limit'] = limit
if skip is not None:
data['skip'] = skip
if start is not None:
data['from'] = start
if end is not None:
data['to'] = end
if output_format is not None:
data['format'] = output_format
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
def get_job(self, job_id):
"""Retreive a single job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def delete_job(self, job_id):
"""Removes the job from the system with all the linked assets."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def stop_job(self, job_id):
"""Terminates a running job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}/stop'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_job_assets(self, job_id):
"""Get details about the static assets collected for a specific job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_job_asset_url(self, job_id, filename):
"""Get details about the static assets collected for a specific job."""
return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}'.format(
self.client.sauce_username, job_id, filename)
def delete_job_assets(self, job_id):
"""Delete all the assets captured during a test run."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_auth_token(self, job_id, date_range=None):
"""Get an auth token to access protected job resources.
https://wiki.saucelabs.com/display/DOCS/Building+Links+to+Test+Results
"""
key = '{}:{}'.format(self.client.sauce_username,
self.client.sauce_access_key)
if date_range:
key = '{}:{}'.format(key, date_range)
return hmac.new(key.encode('utf-8'), job_id.encode('utf-8'),
md5).hexdigest()
|
cgoldberg/sauceclient | sauceclient.py | Jobs.stop_job | python | def stop_job(self, job_id):
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}/stop'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint) | Terminates a running job. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L461-L466 | null | class Jobs(object):
"""Job Methods
- https://wiki.saucelabs.com/display/DOCS/Job+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_jobs(self, full=None, limit=None, skip=None, start=None, end=None,
output_format=None):
"""List jobs belonging to a specific user."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs'.format(self.client.sauce_username)
data = {}
if full is not None:
data['full'] = full
if limit is not None:
data['limit'] = limit
if skip is not None:
data['skip'] = skip
if start is not None:
data['from'] = start
if end is not None:
data['to'] = end
if output_format is not None:
data['format'] = output_format
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
def get_job(self, job_id):
"""Retreive a single job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def update_job(self, job_id, build=None, custom_data=None,
name=None, passed=None, public=None, tags=None):
"""Edit an existing job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
data = {}
if build is not None:
data['build'] = build
if custom_data is not None:
data['custom-data'] = custom_data
if name is not None:
data['name'] = name
if passed is not None:
data['passed'] = passed
if public is not None:
data['public'] = public
if tags is not None:
data['tags'] = tags
body = json.dumps(data)
return self.client.request(method, endpoint, body=body)
def delete_job(self, job_id):
"""Removes the job from the system with all the linked assets."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def get_job_assets(self, job_id):
"""Get details about the static assets collected for a specific job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_job_asset_url(self, job_id, filename):
"""Get details about the static assets collected for a specific job."""
return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}'.format(
self.client.sauce_username, job_id, filename)
def delete_job_assets(self, job_id):
"""Delete all the assets captured during a test run."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_auth_token(self, job_id, date_range=None):
"""Get an auth token to access protected job resources.
https://wiki.saucelabs.com/display/DOCS/Building+Links+to+Test+Results
"""
key = '{}:{}'.format(self.client.sauce_username,
self.client.sauce_access_key)
if date_range:
key = '{}:{}'.format(key, date_range)
return hmac.new(key.encode('utf-8'), job_id.encode('utf-8'),
md5).hexdigest()
|
cgoldberg/sauceclient | sauceclient.py | Jobs.get_job_asset_url | python | def get_job_asset_url(self, job_id, filename):
return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}'.format(
self.client.sauce_username, job_id, filename) | Get details about the static assets collected for a specific job. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L475-L478 | null | class Jobs(object):
"""Job Methods
- https://wiki.saucelabs.com/display/DOCS/Job+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_jobs(self, full=None, limit=None, skip=None, start=None, end=None,
output_format=None):
"""List jobs belonging to a specific user."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs'.format(self.client.sauce_username)
data = {}
if full is not None:
data['full'] = full
if limit is not None:
data['limit'] = limit
if skip is not None:
data['skip'] = skip
if start is not None:
data['from'] = start
if end is not None:
data['to'] = end
if output_format is not None:
data['format'] = output_format
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
def get_job(self, job_id):
"""Retreive a single job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def update_job(self, job_id, build=None, custom_data=None,
name=None, passed=None, public=None, tags=None):
"""Edit an existing job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
data = {}
if build is not None:
data['build'] = build
if custom_data is not None:
data['custom-data'] = custom_data
if name is not None:
data['name'] = name
if passed is not None:
data['passed'] = passed
if public is not None:
data['public'] = public
if tags is not None:
data['tags'] = tags
body = json.dumps(data)
return self.client.request(method, endpoint, body=body)
def delete_job(self, job_id):
"""Removes the job from the system with all the linked assets."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def stop_job(self, job_id):
"""Terminates a running job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}/stop'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_job_assets(self, job_id):
"""Get details about the static assets collected for a specific job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def delete_job_assets(self, job_id):
"""Delete all the assets captured during a test run."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_auth_token(self, job_id, date_range=None):
"""Get an auth token to access protected job resources.
https://wiki.saucelabs.com/display/DOCS/Building+Links+to+Test+Results
"""
key = '{}:{}'.format(self.client.sauce_username,
self.client.sauce_access_key)
if date_range:
key = '{}:{}'.format(key, date_range)
return hmac.new(key.encode('utf-8'), job_id.encode('utf-8'),
md5).hexdigest()
|
cgoldberg/sauceclient | sauceclient.py | Jobs.get_auth_token | python | def get_auth_token(self, job_id, date_range=None):
key = '{}:{}'.format(self.client.sauce_username,
self.client.sauce_access_key)
if date_range:
key = '{}:{}'.format(key, date_range)
return hmac.new(key.encode('utf-8'), job_id.encode('utf-8'),
md5).hexdigest() | Get an auth token to access protected job resources.
https://wiki.saucelabs.com/display/DOCS/Building+Links+to+Test+Results | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L487-L497 | null | class Jobs(object):
"""Job Methods
- https://wiki.saucelabs.com/display/DOCS/Job+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_jobs(self, full=None, limit=None, skip=None, start=None, end=None,
output_format=None):
"""List jobs belonging to a specific user."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs'.format(self.client.sauce_username)
data = {}
if full is not None:
data['full'] = full
if limit is not None:
data['limit'] = limit
if skip is not None:
data['skip'] = skip
if start is not None:
data['from'] = start
if end is not None:
data['to'] = end
if output_format is not None:
data['format'] = output_format
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint)
def get_job(self, job_id):
"""Retreive a single job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def update_job(self, job_id, build=None, custom_data=None,
name=None, passed=None, public=None, tags=None):
"""Edit an existing job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
data = {}
if build is not None:
data['build'] = build
if custom_data is not None:
data['custom-data'] = custom_data
if name is not None:
data['name'] = name
if passed is not None:
data['passed'] = passed
if public is not None:
data['public'] = public
if tags is not None:
data['tags'] = tags
body = json.dumps(data)
return self.client.request(method, endpoint, body=body)
def delete_job(self, job_id):
"""Removes the job from the system with all the linked assets."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
return self.client.request(method, endpoint)
def stop_job(self, job_id):
"""Terminates a running job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}/stop'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_job_assets(self, job_id):
"""Get details about the static assets collected for a specific job."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
def get_job_asset_url(self, job_id, filename):
"""Get details about the static assets collected for a specific job."""
return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}'.format(
self.client.sauce_username, job_id, filename)
def delete_job_assets(self, job_id):
"""Delete all the assets captured during a test run."""
method = 'DELETE'
endpoint = '/rest/v1/{}/jobs/{}/assets'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Storage.upload_file | python | def upload_file(self, filepath, overwrite=True):
method = 'POST'
filename = os.path.split(filepath)[1]
endpoint = '/rest/v1/storage/{}/{}?overwrite={}'.format(
self.client.sauce_username, filename, "true" if overwrite else "false")
with open(filepath, 'rb') as filehandle:
body = filehandle.read()
return self.client.request(method, endpoint, body,
content_type='application/octet-stream') | Uploads a file to the temporary sauce storage. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L509-L518 | null | class Storage(object):
"""Temporary Storage Methods
- https://wiki.saucelabs.com/display/DOCS/Temporary+Storage+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_stored_files(self):
"""Check which files are in your temporary storage."""
method = 'GET'
endpoint = '/rest/v1/storage/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Storage.get_stored_files | python | def get_stored_files(self):
method = 'GET'
endpoint = '/rest/v1/storage/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint) | Check which files are in your temporary storage. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L520-L524 | null | class Storage(object):
"""Temporary Storage Methods
- https://wiki.saucelabs.com/display/DOCS/Temporary+Storage+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def upload_file(self, filepath, overwrite=True):
"""Uploads a file to the temporary sauce storage."""
method = 'POST'
filename = os.path.split(filepath)[1]
endpoint = '/rest/v1/storage/{}/{}?overwrite={}'.format(
self.client.sauce_username, filename, "true" if overwrite else "false")
with open(filepath, 'rb') as filehandle:
body = filehandle.read()
return self.client.request(method, endpoint, body,
content_type='application/octet-stream')
|
cgoldberg/sauceclient | sauceclient.py | Tunnels.get_tunnels | python | def get_tunnels(self):
method = 'GET'
endpoint = '/rest/v1/{}/tunnels'.format(self.client.sauce_username)
return self.client.request(method, endpoint) | Retrieves all running tunnels for a specific user. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L536-L540 | null | class Tunnels(object):
"""Tunnel Methods
- https://wiki.saucelabs.com/display/DOCS/Tunnel+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_tunnel(self, tunnel_id):
"""Get information for a tunnel given its ID."""
method = 'GET'
endpoint = '/rest/v1/{}/tunnels/{}'.format(
self.client.sauce_username, tunnel_id)
return self.client.request(method, endpoint)
def delete_tunnel(self, tunnel_id):
"""Get information for a tunnel given its ID."""
method = 'DELETE'
endpoint = '/rest/v1/{}/tunnels/{}'.format(
self.client.sauce_username, tunnel_id)
return self.client.request(method, endpoint)
|
cgoldberg/sauceclient | sauceclient.py | Tunnels.get_tunnel | python | def get_tunnel(self, tunnel_id):
method = 'GET'
endpoint = '/rest/v1/{}/tunnels/{}'.format(
self.client.sauce_username, tunnel_id)
return self.client.request(method, endpoint) | Get information for a tunnel given its ID. | train | https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L542-L547 | null | class Tunnels(object):
"""Tunnel Methods
- https://wiki.saucelabs.com/display/DOCS/Tunnel+Methods
"""
def __init__(self, client):
"""Initialize class."""
self.client = client
def get_tunnels(self):
"""Retrieves all running tunnels for a specific user."""
method = 'GET'
endpoint = '/rest/v1/{}/tunnels'.format(self.client.sauce_username)
return self.client.request(method, endpoint)
def delete_tunnel(self, tunnel_id):
"""Get information for a tunnel given its ID."""
method = 'DELETE'
endpoint = '/rest/v1/{}/tunnels/{}'.format(
self.client.sauce_username, tunnel_id)
return self.client.request(method, endpoint)
|
thefab/tornadis | tornadis/pubsub.py | PubSubClient.pubsub_pop_message | python | def pubsub_pop_message(self, deadline=None):
if not self.subscribed:
excep = ClientError("you must subscribe before using "
"pubsub_pop_message")
raise tornado.gen.Return(excep)
reply = None
try:
reply = self._reply_list.pop(0)
raise tornado.gen.Return(reply)
except IndexError:
pass
if deadline is not None:
td = timedelta(seconds=deadline)
yield self._condition.wait(timeout=td)
else:
yield self._condition.wait()
try:
reply = self._reply_list.pop(0)
except IndexError:
pass
raise tornado.gen.Return(reply) | Pops a message for a subscribed client.
Args:
deadline (int): max number of seconds to wait (None => no timeout)
Returns:
Future with the popped message as result (or None if timeout
or ConnectionError object in case of connection errors
or ClientError object if you are not subscribed) | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/pubsub.py#L140-L170 | null | class PubSubClient(Client):
"""High level specific object to interact with pubsub redis.
The call() method is forbidden with this object.
More informations on the redis side: http://redis.io/topics/pubsub
"""
def call(self, *args, **kwargs):
"""Not allowed method with PubSubClient object."""
raise ClientError("not allowed with PubSubClient object")
def async_call(self, *args, **kwargs):
"""Not allowed method with PubSubClient object."""
raise ClientError("not allowed with PubSubClient object")
def pubsub_subscribe(self, *args):
"""Subscribes to a list of channels.
http://redis.io/topics/pubsub
Args:
*args: variable list of channels to subscribe.
Returns:
Future: Future with True as result if the subscribe is ok.
Examples:
>>> yield client.pubsub_subscribe("channel1", "channel2")
"""
return self._pubsub_subscribe(b"SUBSCRIBE", *args)
def pubsub_psubscribe(self, *args):
"""Subscribes to a list of patterns.
http://redis.io/topics/pubsub
Args:
*args: variable list of patterns to subscribe.
Returns:
Future: Future with True as result if the subscribe is ok.
Examples:
>>> yield client.pubsub_psubscribe("channel*", "foo*")
"""
return self._pubsub_subscribe(b"PSUBSCRIBE", *args)
@tornado.gen.coroutine
def _pubsub_subscribe(self, command, *args):
if len(args) == 0:
LOG.warning("you must provide at least one argument")
raise tornado.gen.Return(False)
results = yield Client.call(self, command, *args,
__multiple_replies=len(args))
if isinstance(results, ConnectionError):
raise tornado.gen.Return(False)
for reply in results:
if isinstance(reply, ConnectionError) or len(reply) != 3 or \
reply[0].lower() != command.lower() or reply[2] == 0:
raise tornado.gen.Return(False)
self.subscribed = True
raise tornado.gen.Return(True)
def pubsub_unsubscribe(self, *args):
"""Unsubscribes from a list of channels.
http://redis.io/topics/pubsub
Args:
*args: variable list of channels to unsubscribe.
Returns:
Future: Future with True as result if the unsubscribe is ok.
Examples:
>>> yield client.pubsub_unsubscribe("channel1", "channel2")
"""
return self._pubsub_unsubscribe(b"UNSUBSCRIBE", *args)
def pubsub_punsubscribe(self, *args):
"""Unsubscribes from a list of patterns.
http://redis.io/topics/pubsub
Args:
*args: variable list of patterns to unsubscribe.
Returns:
Future: Future with True as result if the unsubscribe is ok.
Examples:
>>> yield client.pubsub_punsubscribe("channel*", "foo*")
"""
return self._pubsub_unsubscribe(b"PUNSUBSCRIBE", *args)
@tornado.gen.coroutine
def _pubsub_unsubscribe(self, command, *args):
if len(args) == 0:
# see https://github.com/thefab/tornadis/issues/17
args_len = 1
else:
args_len = len(args)
results = yield Client.call(self, command, *args,
__multiple_replies=args_len)
if isinstance(results, ConnectionError):
raise tornado.gen.Return(False)
for reply in results:
if isinstance(reply, ConnectionError) or len(reply) != 3 or \
reply[0].lower() != command.lower():
raise tornado.gen.Return(False)
if reply[2] == 0:
self.subscribed = False
raise tornado.gen.Return(True)
@tornado.gen.coroutine
|
thefab/tornadis | tornadis/write_buffer.py | WriteBuffer.clear | python | def clear(self):
self._deque.clear()
self._total_length = 0
self._has_view = False | Resets the object at its initial (empty) state. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/write_buffer.py#L39-L43 | null | class WriteBuffer(object):
"""Write buffer implementation optimized for reading by max sized chunks.
It is built on a deque and memoryviews to avoid too much string copies.
Attributes:
use_memory_view_min_size (int): minimum size before using memoryview
objects (to avoid object creation overhead bigger than string
copy for this size)
_deque (collections.deque): deque object to store each write
(without copy)
_has_view (boolean): True if there is some memoryview objects inside
the deque (if _has_view=False, there are some
"fastpath optimizations")
_total_length (int): total size (in bytes) of the buffer content
"""
def __init__(self, use_memory_view_min_size=4096):
"""Constructor.
Args:
use_memory_view_min_size (int): minimum size before using
memoryview objects (advanced option, the default is probably
good for you).
"""
self.use_memory_view_min_size = use_memory_view_min_size
self._deque = collections.deque()
self.clear()
def __str__(self):
return self._tobytes()
def __bytes__(self):
return self._tobytes()
def __len__(self):
return self._total_length
def _tobytes(self):
"""Serializes the write buffer into a single string (bytes).
Returns:
a string (bytes) object.
"""
if not self._has_view:
# fast path optimization
if len(self._deque) == 0:
return b""
elif len(self._deque) == 1:
# no copy
return self._deque[0]
else:
return b"".join(self._deque)
else:
tmp = [x.tobytes() if isinstance(x, memoryview) else x
for x in self._deque]
return b"".join(tmp)
def is_empty(self):
"""Returns True if the buffer is empty.
Returns:
True or False.
"""
return self._total_length == 0
def append(self, data):
"""Appends some data to end of the buffer (right).
No string copy is done during this operation.
Args:
data: data to put in the buffer (can be string, memoryview or
another WriteBuffer).
"""
self._append(data, True)
def appendleft(self, data):
"""Appends some data at the beginning of the buffer (left).
No string copy is done during this operation.
Args:
data: data to put in the buffer (can be string, memoryview or
another WriteBuffer).
"""
self._append(data, False)
def _append(self, data, right):
if isinstance(data, WriteBuffer):
# data is another writebuffer
if right:
self._deque.extend(data._deque)
else:
self._deque.extendleft(data._deque)
self._total_length += data._total_length
self._has_view = self._has_view and data._has_view
else:
length = len(data)
if length == 0:
return
if isinstance(data, memoryview):
# data is a memory viewobject
# nothing spacial but now the buffer has views
self._has_view = True
self._total_length += length
if right:
self._deque.append(data)
else:
self._deque.appendleft(data)
def _get_pointer_or_memoryview(self, data, data_length):
if data_length < self.use_memory_view_min_size \
or isinstance(data, memoryview):
return data
else:
return memoryview(data)
def pop_chunk(self, chunk_max_size):
"""Pops a chunk of the given max size.
Optimized to avoid too much string copies.
Args:
chunk_max_size (int): max size of the returned chunk.
Returns:
string (bytes) with a size <= chunk_max_size.
"""
if self._total_length < chunk_max_size:
# fastpath (the whole queue fit in a single chunk)
res = self._tobytes()
self.clear()
return res
first_iteration = True
while True:
try:
data = self._deque.popleft()
data_length = len(data)
self._total_length -= data_length
if first_iteration:
# first iteration
if data_length == chunk_max_size:
# we are lucky !
return data
elif data_length > chunk_max_size:
# we have enough data at first iteration
# => fast path optimization
view = self._get_pointer_or_memoryview(data,
data_length)
self.appendleft(view[chunk_max_size:])
return view[:chunk_max_size]
else:
# no single iteration fast path optimization :-(
# let's use a WriteBuffer to build the result chunk
chunk_write_buffer = WriteBuffer()
else:
# not first iteration
if chunk_write_buffer._total_length + data_length \
> chunk_max_size:
view = self._get_pointer_or_memoryview(data,
data_length)
limit = chunk_max_size - \
chunk_write_buffer._total_length - data_length
self.appendleft(view[limit:])
data = view[:limit]
chunk_write_buffer.append(data)
if chunk_write_buffer._total_length >= chunk_max_size:
break
except IndexError:
# the buffer is empty (so no memoryview inside)
self._has_view = False
break
first_iteration = False
return chunk_write_buffer._tobytes()
|
thefab/tornadis | tornadis/write_buffer.py | WriteBuffer._tobytes | python | def _tobytes(self):
if not self._has_view:
# fast path optimization
if len(self._deque) == 0:
return b""
elif len(self._deque) == 1:
# no copy
return self._deque[0]
else:
return b"".join(self._deque)
else:
tmp = [x.tobytes() if isinstance(x, memoryview) else x
for x in self._deque]
return b"".join(tmp) | Serializes the write buffer into a single string (bytes).
Returns:
a string (bytes) object. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/write_buffer.py#L54-L72 | null | class WriteBuffer(object):
"""Write buffer implementation optimized for reading by max sized chunks.
It is built on a deque and memoryviews to avoid too much string copies.
Attributes:
use_memory_view_min_size (int): minimum size before using memoryview
objects (to avoid object creation overhead bigger than string
copy for this size)
_deque (collections.deque): deque object to store each write
(without copy)
_has_view (boolean): True if there is some memoryview objects inside
the deque (if _has_view=False, there are some
"fastpath optimizations")
_total_length (int): total size (in bytes) of the buffer content
"""
def __init__(self, use_memory_view_min_size=4096):
"""Constructor.
Args:
use_memory_view_min_size (int): minimum size before using
memoryview objects (advanced option, the default is probably
good for you).
"""
self.use_memory_view_min_size = use_memory_view_min_size
self._deque = collections.deque()
self.clear()
def clear(self):
"""Resets the object at its initial (empty) state."""
self._deque.clear()
self._total_length = 0
self._has_view = False
def __str__(self):
return self._tobytes()
def __bytes__(self):
return self._tobytes()
def __len__(self):
return self._total_length
def is_empty(self):
"""Returns True if the buffer is empty.
Returns:
True or False.
"""
return self._total_length == 0
def append(self, data):
"""Appends some data to end of the buffer (right).
No string copy is done during this operation.
Args:
data: data to put in the buffer (can be string, memoryview or
another WriteBuffer).
"""
self._append(data, True)
def appendleft(self, data):
"""Appends some data at the beginning of the buffer (left).
No string copy is done during this operation.
Args:
data: data to put in the buffer (can be string, memoryview or
another WriteBuffer).
"""
self._append(data, False)
def _append(self, data, right):
if isinstance(data, WriteBuffer):
# data is another writebuffer
if right:
self._deque.extend(data._deque)
else:
self._deque.extendleft(data._deque)
self._total_length += data._total_length
self._has_view = self._has_view and data._has_view
else:
length = len(data)
if length == 0:
return
if isinstance(data, memoryview):
# data is a memory viewobject
# nothing spacial but now the buffer has views
self._has_view = True
self._total_length += length
if right:
self._deque.append(data)
else:
self._deque.appendleft(data)
def _get_pointer_or_memoryview(self, data, data_length):
if data_length < self.use_memory_view_min_size \
or isinstance(data, memoryview):
return data
else:
return memoryview(data)
def pop_chunk(self, chunk_max_size):
"""Pops a chunk of the given max size.
Optimized to avoid too much string copies.
Args:
chunk_max_size (int): max size of the returned chunk.
Returns:
string (bytes) with a size <= chunk_max_size.
"""
if self._total_length < chunk_max_size:
# fastpath (the whole queue fit in a single chunk)
res = self._tobytes()
self.clear()
return res
first_iteration = True
while True:
try:
data = self._deque.popleft()
data_length = len(data)
self._total_length -= data_length
if first_iteration:
# first iteration
if data_length == chunk_max_size:
# we are lucky !
return data
elif data_length > chunk_max_size:
# we have enough data at first iteration
# => fast path optimization
view = self._get_pointer_or_memoryview(data,
data_length)
self.appendleft(view[chunk_max_size:])
return view[:chunk_max_size]
else:
# no single iteration fast path optimization :-(
# let's use a WriteBuffer to build the result chunk
chunk_write_buffer = WriteBuffer()
else:
# not first iteration
if chunk_write_buffer._total_length + data_length \
> chunk_max_size:
view = self._get_pointer_or_memoryview(data,
data_length)
limit = chunk_max_size - \
chunk_write_buffer._total_length - data_length
self.appendleft(view[limit:])
data = view[:limit]
chunk_write_buffer.append(data)
if chunk_write_buffer._total_length >= chunk_max_size:
break
except IndexError:
# the buffer is empty (so no memoryview inside)
self._has_view = False
break
first_iteration = False
return chunk_write_buffer._tobytes()
|
thefab/tornadis | tornadis/write_buffer.py | WriteBuffer.pop_chunk | python | def pop_chunk(self, chunk_max_size):
if self._total_length < chunk_max_size:
# fastpath (the whole queue fit in a single chunk)
res = self._tobytes()
self.clear()
return res
first_iteration = True
while True:
try:
data = self._deque.popleft()
data_length = len(data)
self._total_length -= data_length
if first_iteration:
# first iteration
if data_length == chunk_max_size:
# we are lucky !
return data
elif data_length > chunk_max_size:
# we have enough data at first iteration
# => fast path optimization
view = self._get_pointer_or_memoryview(data,
data_length)
self.appendleft(view[chunk_max_size:])
return view[:chunk_max_size]
else:
# no single iteration fast path optimization :-(
# let's use a WriteBuffer to build the result chunk
chunk_write_buffer = WriteBuffer()
else:
# not first iteration
if chunk_write_buffer._total_length + data_length \
> chunk_max_size:
view = self._get_pointer_or_memoryview(data,
data_length)
limit = chunk_max_size - \
chunk_write_buffer._total_length - data_length
self.appendleft(view[limit:])
data = view[:limit]
chunk_write_buffer.append(data)
if chunk_write_buffer._total_length >= chunk_max_size:
break
except IndexError:
# the buffer is empty (so no memoryview inside)
self._has_view = False
break
first_iteration = False
return chunk_write_buffer._tobytes() | Pops a chunk of the given max size.
Optimized to avoid too much string copies.
Args:
chunk_max_size (int): max size of the returned chunk.
Returns:
string (bytes) with a size <= chunk_max_size. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/write_buffer.py#L134-L190 | [
"def clear(self):\n \"\"\"Resets the object at its initial (empty) state.\"\"\"\n self._deque.clear()\n self._total_length = 0\n self._has_view = False\n",
"def _tobytes(self):\n \"\"\"Serializes the write buffer into a single string (bytes).\n\n Returns:\n a string (bytes) object.\n \... | class WriteBuffer(object):
"""Write buffer implementation optimized for reading by max sized chunks.
It is built on a deque and memoryviews to avoid too much string copies.
Attributes:
use_memory_view_min_size (int): minimum size before using memoryview
objects (to avoid object creation overhead bigger than string
copy for this size)
_deque (collections.deque): deque object to store each write
(without copy)
_has_view (boolean): True if there is some memoryview objects inside
the deque (if _has_view=False, there are some
"fastpath optimizations")
_total_length (int): total size (in bytes) of the buffer content
"""
def __init__(self, use_memory_view_min_size=4096):
"""Constructor.
Args:
use_memory_view_min_size (int): minimum size before using
memoryview objects (advanced option, the default is probably
good for you).
"""
self.use_memory_view_min_size = use_memory_view_min_size
self._deque = collections.deque()
self.clear()
def clear(self):
"""Resets the object at its initial (empty) state."""
self._deque.clear()
self._total_length = 0
self._has_view = False
def __str__(self):
return self._tobytes()
def __bytes__(self):
return self._tobytes()
def __len__(self):
return self._total_length
def _tobytes(self):
"""Serializes the write buffer into a single string (bytes).
Returns:
a string (bytes) object.
"""
if not self._has_view:
# fast path optimization
if len(self._deque) == 0:
return b""
elif len(self._deque) == 1:
# no copy
return self._deque[0]
else:
return b"".join(self._deque)
else:
tmp = [x.tobytes() if isinstance(x, memoryview) else x
for x in self._deque]
return b"".join(tmp)
def is_empty(self):
"""Returns True if the buffer is empty.
Returns:
True or False.
"""
return self._total_length == 0
def append(self, data):
"""Appends some data to end of the buffer (right).
No string copy is done during this operation.
Args:
data: data to put in the buffer (can be string, memoryview or
another WriteBuffer).
"""
self._append(data, True)
def appendleft(self, data):
"""Appends some data at the beginning of the buffer (left).
No string copy is done during this operation.
Args:
data: data to put in the buffer (can be string, memoryview or
another WriteBuffer).
"""
self._append(data, False)
def _append(self, data, right):
if isinstance(data, WriteBuffer):
# data is another writebuffer
if right:
self._deque.extend(data._deque)
else:
self._deque.extendleft(data._deque)
self._total_length += data._total_length
self._has_view = self._has_view and data._has_view
else:
length = len(data)
if length == 0:
return
if isinstance(data, memoryview):
# data is a memory viewobject
# nothing spacial but now the buffer has views
self._has_view = True
self._total_length += length
if right:
self._deque.append(data)
else:
self._deque.appendleft(data)
def _get_pointer_or_memoryview(self, data, data_length):
if data_length < self.use_memory_view_min_size \
or isinstance(data, memoryview):
return data
else:
return memoryview(data)
|
thefab/tornadis | tornadis/pool.py | ClientPool.get_connected_client | python | def get_connected_client(self):
if self.__sem is not None:
yield self.__sem.acquire()
client = None
newly_created, client = self._get_client_from_pool_or_make_it()
if newly_created:
res = yield client.connect()
if not res:
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(
ClientError("can't connect to %s" % client.title))
raise tornado.gen.Return(client) | Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem) | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/pool.py#L75-L95 | [
"def _get_client_from_pool_or_make_it(self):\n try:\n while True:\n client = self.__pool.popleft()\n if client.is_connected():\n if self._is_expired_client(client):\n client.disconnect()\n continue\n break\n excep... | class ClientPool(object):
"""High level object to deal with a pool of redis clients."""
def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,
**client_kwargs):
"""Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit").
client_timeout (int): timeout in seconds of a connection released
to the pool (-1 means "no timeout").
autoclose (boolean): automatically disconnect released connections
with lifetime > client_timeout (test made every
client_timeout/10 seconds).
client_kwargs (dict): Client constructor arguments.
"""
self.max_size = max_size
self.client_timeout = client_timeout
self.client_kwargs = client_kwargs
self.__ioloop = client_kwargs.get('ioloop',
tornado.ioloop.IOLoop.instance())
self.autoclose = autoclose
self.__pool = deque()
if self.max_size != -1:
self.__sem = tornado.locks.Semaphore(self.max_size)
else:
self.__sem = None
self.__autoclose_periodic = None
if self.autoclose and self.client_timeout > 0:
every = int(self.client_timeout) * 100
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every)
else:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every, self.__ioloop)
self.__autoclose_periodic = cb
self.__autoclose_periodic.start()
def _get_client_from_pool_or_make_it(self):
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
continue
break
except IndexError:
client = self._make_client()
return (True, client)
return (False, client)
@tornado.gen.coroutine
def get_client_nowait(self):
"""Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None).
"""
if self.__sem is not None:
if self.__sem._value == 0:
return None
self.__sem.acquire()
_, client = self._get_client_from_pool_or_make_it()
return client
def _autoclose(self):
newpool = deque()
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
else:
newpool.append(client)
except IndexError:
self.__pool = newpool
def _is_expired_client(self, client):
if self.client_timeout != -1 and client.is_connected():
delta = client.get_last_state_change_timedelta()
if delta.total_seconds() >= self.client_timeout:
return True
return False
def connected_client(self):
"""Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING")
"""
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb)
def _connected_client_release_cb(self, future=None):
client = future.result()
self.release_client(client)
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object.
"""
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release()
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
break
@tornado.gen.coroutine
def preconnect(self, size=-1):
"""(pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1
"""
if size == -1 and self.max_size == -1:
raise ClientError("size=-1 not allowed with pool max_size=-1")
limit = min(size, self.max_size) if size != -1 else self.max_size
clients = yield [self.get_connected_client() for _ in range(0, limit)]
for client in clients:
self.release_client(client)
def _make_client(self):
"""Makes and returns a Client object."""
kwargs = self.client_kwargs
client = Client(**kwargs)
return client
|
thefab/tornadis | tornadis/pool.py | ClientPool.get_client_nowait | python | def get_client_nowait(self):
if self.__sem is not None:
if self.__sem._value == 0:
return None
self.__sem.acquire()
_, client = self._get_client_from_pool_or_make_it()
return client | Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None). | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/pool.py#L97-L110 | [
"def _get_client_from_pool_or_make_it(self):\n try:\n while True:\n client = self.__pool.popleft()\n if client.is_connected():\n if self._is_expired_client(client):\n client.disconnect()\n continue\n break\n excep... | class ClientPool(object):
"""High level object to deal with a pool of redis clients."""
def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,
**client_kwargs):
"""Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit").
client_timeout (int): timeout in seconds of a connection released
to the pool (-1 means "no timeout").
autoclose (boolean): automatically disconnect released connections
with lifetime > client_timeout (test made every
client_timeout/10 seconds).
client_kwargs (dict): Client constructor arguments.
"""
self.max_size = max_size
self.client_timeout = client_timeout
self.client_kwargs = client_kwargs
self.__ioloop = client_kwargs.get('ioloop',
tornado.ioloop.IOLoop.instance())
self.autoclose = autoclose
self.__pool = deque()
if self.max_size != -1:
self.__sem = tornado.locks.Semaphore(self.max_size)
else:
self.__sem = None
self.__autoclose_periodic = None
if self.autoclose and self.client_timeout > 0:
every = int(self.client_timeout) * 100
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every)
else:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every, self.__ioloop)
self.__autoclose_periodic = cb
self.__autoclose_periodic.start()
def _get_client_from_pool_or_make_it(self):
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
continue
break
except IndexError:
client = self._make_client()
return (True, client)
return (False, client)
@tornado.gen.coroutine
def get_connected_client(self):
"""Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
"""
if self.__sem is not None:
yield self.__sem.acquire()
client = None
newly_created, client = self._get_client_from_pool_or_make_it()
if newly_created:
res = yield client.connect()
if not res:
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(
ClientError("can't connect to %s" % client.title))
raise tornado.gen.Return(client)
def _autoclose(self):
newpool = deque()
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
else:
newpool.append(client)
except IndexError:
self.__pool = newpool
def _is_expired_client(self, client):
if self.client_timeout != -1 and client.is_connected():
delta = client.get_last_state_change_timedelta()
if delta.total_seconds() >= self.client_timeout:
return True
return False
def connected_client(self):
"""Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING")
"""
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb)
def _connected_client_release_cb(self, future=None):
client = future.result()
self.release_client(client)
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object.
"""
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release()
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
break
@tornado.gen.coroutine
def preconnect(self, size=-1):
"""(pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1
"""
if size == -1 and self.max_size == -1:
raise ClientError("size=-1 not allowed with pool max_size=-1")
limit = min(size, self.max_size) if size != -1 else self.max_size
clients = yield [self.get_connected_client() for _ in range(0, limit)]
for client in clients:
self.release_client(client)
def _make_client(self):
"""Makes and returns a Client object."""
kwargs = self.client_kwargs
client = Client(**kwargs)
return client
|
thefab/tornadis | tornadis/pool.py | ClientPool.connected_client | python | def connected_client(self):
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb) | Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING") | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/pool.py#L132-L147 | null | class ClientPool(object):
"""High level object to deal with a pool of redis clients."""
def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,
**client_kwargs):
"""Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit").
client_timeout (int): timeout in seconds of a connection released
to the pool (-1 means "no timeout").
autoclose (boolean): automatically disconnect released connections
with lifetime > client_timeout (test made every
client_timeout/10 seconds).
client_kwargs (dict): Client constructor arguments.
"""
self.max_size = max_size
self.client_timeout = client_timeout
self.client_kwargs = client_kwargs
self.__ioloop = client_kwargs.get('ioloop',
tornado.ioloop.IOLoop.instance())
self.autoclose = autoclose
self.__pool = deque()
if self.max_size != -1:
self.__sem = tornado.locks.Semaphore(self.max_size)
else:
self.__sem = None
self.__autoclose_periodic = None
if self.autoclose and self.client_timeout > 0:
every = int(self.client_timeout) * 100
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every)
else:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every, self.__ioloop)
self.__autoclose_periodic = cb
self.__autoclose_periodic.start()
def _get_client_from_pool_or_make_it(self):
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
continue
break
except IndexError:
client = self._make_client()
return (True, client)
return (False, client)
@tornado.gen.coroutine
def get_connected_client(self):
"""Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
"""
if self.__sem is not None:
yield self.__sem.acquire()
client = None
newly_created, client = self._get_client_from_pool_or_make_it()
if newly_created:
res = yield client.connect()
if not res:
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(
ClientError("can't connect to %s" % client.title))
raise tornado.gen.Return(client)
def get_client_nowait(self):
"""Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None).
"""
if self.__sem is not None:
if self.__sem._value == 0:
return None
self.__sem.acquire()
_, client = self._get_client_from_pool_or_make_it()
return client
def _autoclose(self):
newpool = deque()
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
else:
newpool.append(client)
except IndexError:
self.__pool = newpool
def _is_expired_client(self, client):
if self.client_timeout != -1 and client.is_connected():
delta = client.get_last_state_change_timedelta()
if delta.total_seconds() >= self.client_timeout:
return True
return False
def _connected_client_release_cb(self, future=None):
client = future.result()
self.release_client(client)
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object.
"""
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release()
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
break
@tornado.gen.coroutine
def preconnect(self, size=-1):
"""(pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1
"""
if size == -1 and self.max_size == -1:
raise ClientError("size=-1 not allowed with pool max_size=-1")
limit = min(size, self.max_size) if size != -1 else self.max_size
clients = yield [self.get_connected_client() for _ in range(0, limit)]
for client in clients:
self.release_client(client)
def _make_client(self):
"""Makes and returns a Client object."""
kwargs = self.client_kwargs
client = Client(**kwargs)
return client
|
thefab/tornadis | tornadis/pool.py | ClientPool.release_client | python | def release_client(self, client):
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release() | Releases a client object to the pool.
Args:
client: Client object. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/pool.py#L153-L167 | [
"def _is_expired_client(self, client):\n if self.client_timeout != -1 and client.is_connected():\n delta = client.get_last_state_change_timedelta()\n if delta.total_seconds() >= self.client_timeout:\n return True\n return False\n"
] | class ClientPool(object):
"""High level object to deal with a pool of redis clients."""
def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,
**client_kwargs):
"""Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit").
client_timeout (int): timeout in seconds of a connection released
to the pool (-1 means "no timeout").
autoclose (boolean): automatically disconnect released connections
with lifetime > client_timeout (test made every
client_timeout/10 seconds).
client_kwargs (dict): Client constructor arguments.
"""
self.max_size = max_size
self.client_timeout = client_timeout
self.client_kwargs = client_kwargs
self.__ioloop = client_kwargs.get('ioloop',
tornado.ioloop.IOLoop.instance())
self.autoclose = autoclose
self.__pool = deque()
if self.max_size != -1:
self.__sem = tornado.locks.Semaphore(self.max_size)
else:
self.__sem = None
self.__autoclose_periodic = None
if self.autoclose and self.client_timeout > 0:
every = int(self.client_timeout) * 100
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every)
else:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every, self.__ioloop)
self.__autoclose_periodic = cb
self.__autoclose_periodic.start()
def _get_client_from_pool_or_make_it(self):
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
continue
break
except IndexError:
client = self._make_client()
return (True, client)
return (False, client)
@tornado.gen.coroutine
def get_connected_client(self):
"""Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
"""
if self.__sem is not None:
yield self.__sem.acquire()
client = None
newly_created, client = self._get_client_from_pool_or_make_it()
if newly_created:
res = yield client.connect()
if not res:
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(
ClientError("can't connect to %s" % client.title))
raise tornado.gen.Return(client)
def get_client_nowait(self):
"""Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None).
"""
if self.__sem is not None:
if self.__sem._value == 0:
return None
self.__sem.acquire()
_, client = self._get_client_from_pool_or_make_it()
return client
def _autoclose(self):
newpool = deque()
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
else:
newpool.append(client)
except IndexError:
self.__pool = newpool
def _is_expired_client(self, client):
if self.client_timeout != -1 and client.is_connected():
delta = client.get_last_state_change_timedelta()
if delta.total_seconds() >= self.client_timeout:
return True
return False
def connected_client(self):
"""Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING")
"""
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb)
def _connected_client_release_cb(self, future=None):
client = future.result()
self.release_client(client)
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
break
@tornado.gen.coroutine
def preconnect(self, size=-1):
"""(pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1
"""
if size == -1 and self.max_size == -1:
raise ClientError("size=-1 not allowed with pool max_size=-1")
limit = min(size, self.max_size) if size != -1 else self.max_size
clients = yield [self.get_connected_client() for _ in range(0, limit)]
for client in clients:
self.release_client(client)
def _make_client(self):
"""Makes and returns a Client object."""
kwargs = self.client_kwargs
client = Client(**kwargs)
return client
|
thefab/tornadis | tornadis/pool.py | ClientPool.destroy | python | def destroy(self):
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
break | Disconnects all pooled client objects. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/pool.py#L169-L177 | null | class ClientPool(object):
"""High level object to deal with a pool of redis clients."""
def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,
**client_kwargs):
"""Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit").
client_timeout (int): timeout in seconds of a connection released
to the pool (-1 means "no timeout").
autoclose (boolean): automatically disconnect released connections
with lifetime > client_timeout (test made every
client_timeout/10 seconds).
client_kwargs (dict): Client constructor arguments.
"""
self.max_size = max_size
self.client_timeout = client_timeout
self.client_kwargs = client_kwargs
self.__ioloop = client_kwargs.get('ioloop',
tornado.ioloop.IOLoop.instance())
self.autoclose = autoclose
self.__pool = deque()
if self.max_size != -1:
self.__sem = tornado.locks.Semaphore(self.max_size)
else:
self.__sem = None
self.__autoclose_periodic = None
if self.autoclose and self.client_timeout > 0:
every = int(self.client_timeout) * 100
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every)
else:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every, self.__ioloop)
self.__autoclose_periodic = cb
self.__autoclose_periodic.start()
def _get_client_from_pool_or_make_it(self):
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
continue
break
except IndexError:
client = self._make_client()
return (True, client)
return (False, client)
@tornado.gen.coroutine
def get_connected_client(self):
"""Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
"""
if self.__sem is not None:
yield self.__sem.acquire()
client = None
newly_created, client = self._get_client_from_pool_or_make_it()
if newly_created:
res = yield client.connect()
if not res:
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(
ClientError("can't connect to %s" % client.title))
raise tornado.gen.Return(client)
def get_client_nowait(self):
"""Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None).
"""
if self.__sem is not None:
if self.__sem._value == 0:
return None
self.__sem.acquire()
_, client = self._get_client_from_pool_or_make_it()
return client
def _autoclose(self):
newpool = deque()
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
else:
newpool.append(client)
except IndexError:
self.__pool = newpool
def _is_expired_client(self, client):
if self.client_timeout != -1 and client.is_connected():
delta = client.get_last_state_change_timedelta()
if delta.total_seconds() >= self.client_timeout:
return True
return False
def connected_client(self):
"""Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING")
"""
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb)
def _connected_client_release_cb(self, future=None):
client = future.result()
self.release_client(client)
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object.
"""
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release()
@tornado.gen.coroutine
def preconnect(self, size=-1):
"""(pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1
"""
if size == -1 and self.max_size == -1:
raise ClientError("size=-1 not allowed with pool max_size=-1")
limit = min(size, self.max_size) if size != -1 else self.max_size
clients = yield [self.get_connected_client() for _ in range(0, limit)]
for client in clients:
self.release_client(client)
def _make_client(self):
"""Makes and returns a Client object."""
kwargs = self.client_kwargs
client = Client(**kwargs)
return client
|
thefab/tornadis | tornadis/pool.py | ClientPool.preconnect | python | def preconnect(self, size=-1):
if size == -1 and self.max_size == -1:
raise ClientError("size=-1 not allowed with pool max_size=-1")
limit = min(size, self.max_size) if size != -1 else self.max_size
clients = yield [self.get_connected_client() for _ in range(0, limit)]
for client in clients:
self.release_client(client) | (pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1 | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/pool.py#L180-L195 | [
"def release_client(self, client):\n \"\"\"Releases a client object to the pool.\n\n Args:\n client: Client object.\n \"\"\"\n if isinstance(client, Client):\n if not self._is_expired_client(client):\n LOG.debug('Client is not expired. Adding back to pool')\n self.__p... | class ClientPool(object):
"""High level object to deal with a pool of redis clients."""
def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,
**client_kwargs):
"""Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit").
client_timeout (int): timeout in seconds of a connection released
to the pool (-1 means "no timeout").
autoclose (boolean): automatically disconnect released connections
with lifetime > client_timeout (test made every
client_timeout/10 seconds).
client_kwargs (dict): Client constructor arguments.
"""
self.max_size = max_size
self.client_timeout = client_timeout
self.client_kwargs = client_kwargs
self.__ioloop = client_kwargs.get('ioloop',
tornado.ioloop.IOLoop.instance())
self.autoclose = autoclose
self.__pool = deque()
if self.max_size != -1:
self.__sem = tornado.locks.Semaphore(self.max_size)
else:
self.__sem = None
self.__autoclose_periodic = None
if self.autoclose and self.client_timeout > 0:
every = int(self.client_timeout) * 100
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every)
else:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every, self.__ioloop)
self.__autoclose_periodic = cb
self.__autoclose_periodic.start()
def _get_client_from_pool_or_make_it(self):
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
continue
break
except IndexError:
client = self._make_client()
return (True, client)
return (False, client)
@tornado.gen.coroutine
def get_connected_client(self):
"""Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
"""
if self.__sem is not None:
yield self.__sem.acquire()
client = None
newly_created, client = self._get_client_from_pool_or_make_it()
if newly_created:
res = yield client.connect()
if not res:
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(
ClientError("can't connect to %s" % client.title))
raise tornado.gen.Return(client)
def get_client_nowait(self):
"""Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None).
"""
if self.__sem is not None:
if self.__sem._value == 0:
return None
self.__sem.acquire()
_, client = self._get_client_from_pool_or_make_it()
return client
def _autoclose(self):
newpool = deque()
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
else:
newpool.append(client)
except IndexError:
self.__pool = newpool
def _is_expired_client(self, client):
if self.client_timeout != -1 and client.is_connected():
delta = client.get_last_state_change_timedelta()
if delta.total_seconds() >= self.client_timeout:
return True
return False
def connected_client(self):
"""Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING")
"""
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb)
def _connected_client_release_cb(self, future=None):
client = future.result()
self.release_client(client)
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object.
"""
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release()
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
break
@tornado.gen.coroutine
def _make_client(self):
"""Makes and returns a Client object."""
kwargs = self.client_kwargs
client = Client(**kwargs)
return client
|
thefab/tornadis | tornadis/pipeline.py | Pipeline.stack_call | python | def stack_call(self, *args):
self.pipelined_args.append(args)
self.number_of_stacked_calls = self.number_of_stacked_calls + 1 | Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class.
Args:
*args: full redis command as variable length argument list.
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "value")
>>> pipeline.stack_call("PING")
>>> pipeline.stack_call("INCR", "key2") | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/pipeline.py#L31-L46 | null | class Pipeline(object):
"""Pipeline class to stack redis commands.
A pipeline object is just a kind of stack. You stack complete redis
commands (with their corresponding arguments) inside it.
Then, you use the call() method of a Client object to process the pipeline
(which must be the only argument of this call() call).
More informations on the redis side: http://redis.io/topics/pipelining
Attributes:
pipelined_args: A list of tuples, earch tuple is a complete
redis command.
number_of_stacked_calls: the number of stacked redis commands
(integer).
"""
def __init__(self):
"""Constructor."""
self.pipelined_args = []
self.number_of_stacked_calls = 0
|
thefab/tornadis | tornadis/connection.py | Connection.connect | python | def connect(self):
if self.is_connected() or self.is_connecting():
raise tornado.gen.Return(True)
if self.unix_domain_socket is None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.tcp_nodelay:
self.__socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1)
else:
if not os.path.exists(self.unix_domain_socket):
LOG.warning("can't connect to %s, file does not exist",
self.unix_domain_socket)
raise tornado.gen.Return(False)
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.setblocking(0)
self.__periodic_callback.start()
try:
LOG.debug("connecting to %s...", self._redis_server())
self._state.set_connecting()
if self.unix_domain_socket is None:
self.__socket.connect((self.host, self.port))
else:
self.__socket.connect(self.unix_domain_socket)
except socket.error as e:
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
self.disconnect()
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
self.__socket_fileno = self.__socket.fileno()
self._register_or_update_event_handler()
yield self._state.get_changed_state_future()
if not self.is_connected():
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
else:
LOG.debug("connected to %s", self._redis_server())
self.__socket_fileno = self.__socket.fileno()
self._state.set_connected()
self._register_or_update_event_handler()
raise tornado.gen.Return(True) | Connects the object to the host:port.
Returns:
Future: a Future object with True as result if the connection
process was ok. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/connection.py#L127-L173 | [
"def is_connected(self):\n \"\"\"Returns True if the object is connected.\"\"\"\n return self._state.is_connected()\n"
] | class Connection(object):
"""Low level connection object.
Attributes:
host (string): the host name to connect to.
port (int): the port to connect to.
unix_domain_socket (string): path to a unix socket to connect to
(if set, overrides host/port parameters).
read_page_size (int): page size for reading.
write_page_size (int): page size for writing.
connect_timeout (int): timeout (in seconds) for connecting.
tcp_nodelay (boolean): set TCP_NODELAY on socket.
aggressive_write (boolean): try to minimize write latency over
global throughput (default False).
read_timeout (int): timeout (in seconds) to read something on
the socket (if nothing is read during this time, the
connection is closed) (default: 0 means no timeout)
"""
def __init__(self, read_callback, close_callback,
host=tornadis.DEFAULT_HOST,
port=tornadis.DEFAULT_PORT, unix_domain_socket=None,
read_page_size=tornadis.DEFAULT_READ_PAGE_SIZE,
write_page_size=tornadis.DEFAULT_WRITE_PAGE_SIZE,
connect_timeout=tornadis.DEFAULT_CONNECT_TIMEOUT,
tcp_nodelay=False, aggressive_write=False,
read_timeout=tornadis.DEFAULT_READ_TIMEOUT,
ioloop=None):
"""Constructor.
Args:
read_callback: callback called when there is something to read
(private, do not use from Client constructor).
close_callback: callback called when the connection is closed
(private, do not use from Client constructor).
host (string): the host name to connect to.
port (int): the port to connect to.
unix_domain_socket (string): path to a unix socket to connect to
(if set, overrides host/port parameters).
read_page_size (int): page size for reading.
write_page_size (int): page size for writing.
connect_timeout (int): timeout (in seconds) for connecting.
tcp_nodelay (boolean): set TCP_NODELAY on socket.
aggressive_write (boolean): try to minimize write latency over
global throughput (default False).
read_timeout (int): timeout (in seconds) to read something on
the socket (if nothing is read during this time, the
connection is closed) (default: 0 means no timeout)
ioloop (IOLoop): the tornado ioloop to use.
"""
self.host = host
self.port = port
self.unix_domain_socket = unix_domain_socket
self._state = ConnectionState()
self._ioloop = ioloop or tornado.ioloop.IOLoop.instance()
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._on_every_second, 1000)
else:
cb = tornado.ioloop.PeriodicCallback(self._on_every_second, 1000,
self._ioloop)
self.__periodic_callback = cb
self._read_callback = read_callback
self._close_callback = close_callback
self.read_page_size = read_page_size
self.write_page_size = write_page_size
self.connect_timeout = connect_timeout
self.read_timeout = read_timeout
self.tcp_nodelay = tcp_nodelay
self.aggressive_write = aggressive_write
self._write_buffer = WriteBuffer()
self._listened_events = 0
self._last_read = datetime.now()
def _redis_server(self):
if self.unix_domain_socket:
return self.unix_domain_socket
return "%s:%i" % (self.host, self.port)
def is_connecting(self):
"""Returns True if the object is connecting."""
return self._state.is_connecting()
def is_connected(self):
"""Returns True if the object is connected."""
return self._state.is_connected()
@tornado.gen.coroutine
def _on_every_second(self):
if self.is_connecting():
dt = self._state.get_last_state_change_timedelta()
if dt.total_seconds() > self.connect_timeout:
self.disconnect()
if self.read_timeout > 0:
dt = datetime.now() - self._last_read
if dt.total_seconds() > self.read_timeout:
LOG.warning("read timeout => disconnecting")
self.disconnect()
def _register_or_update_event_handler(self, write=True):
if write:
listened_events = READ_EVENT | WRITE_EVENT | ERROR_EVENT
else:
listened_events = READ_EVENT | ERROR_EVENT
if self._listened_events == 0:
try:
self._ioloop.add_handler(self.__socket_fileno,
self._handle_events, listened_events)
except (OSError, IOError, ValueError):
self.disconnect()
return
else:
if self._listened_events != listened_events:
try:
self._ioloop.update_handler(self.__socket_fileno,
listened_events)
except (OSError, IOError, ValueError):
self.disconnect()
return
self._listened_events = listened_events
def disconnect(self):
"""Disconnects the object.
Safe method (no exception, even if it's already disconnected or if
there are some connection errors).
"""
if not self.is_connected() and not self.is_connecting():
return
LOG.debug("disconnecting from %s...", self._redis_server())
self.__periodic_callback.stop()
try:
self._ioloop.remove_handler(self.__socket_fileno)
self._listened_events = 0
except Exception:
pass
self.__socket_fileno = -1
try:
self.__socket.close()
except Exception:
pass
self._state.set_disconnected()
self._close_callback()
LOG.debug("disconnected from %s", self._redis_server())
def _handle_events(self, fd, event):
if self.is_connecting():
err = self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
LOG.debug("connecting error in _handle_events")
self.disconnect()
return
self._state.set_connected()
LOG.debug("connected to %s", self._redis_server())
if not self.is_connected():
return
if event & self._ioloop.READ:
self._handle_read()
if not self.is_connected():
return
if event & self._ioloop.WRITE:
self._handle_write()
if not self.is_connected():
return
if event & self._ioloop.ERROR:
LOG.debug("unknown socket error")
self.disconnect()
def _handle_read(self):
chunk = self._read(self.read_page_size)
if chunk is not None:
if self.read_timeout > 0:
self._last_read = datetime.now()
self._read_callback(chunk)
def _handle_write(self):
while not self._write_buffer.is_empty():
ps = self.write_page_size
data = self._write_buffer.pop_chunk(ps)
if len(data) > 0:
try:
size = self.__socket.send(data)
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
LOG.debug("write would block")
self._write_buffer.appendleft(data)
break
else:
self.disconnect()
return
else:
LOG.debug("%i bytes written to the socket", size)
if size < len(data):
self._write_buffer.appendleft(data[size:])
break
if self._write_buffer.is_empty():
self._register_or_update_event_handler(write=False)
def _read(self, size):
try:
chunk = self.__socket.recv(size)
chunk_length = len(chunk)
if chunk_length > 0:
LOG.debug("%i bytes read from socket", chunk_length)
return chunk
else:
LOG.debug("closed socket => disconnecting")
self.disconnect()
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
LOG.debug("read would block")
return None
else:
self.disconnect()
def write(self, data):
"""Buffers some data to be sent to the host:port in a non blocking way.
So the data is always buffered and not sent on the socket in a
synchronous way.
You can give a WriteBuffer as parameter. The internal Connection
WriteBuffer will be extended with this one (without copying).
Args:
data (str or WriteBuffer): string (or WriteBuffer) to write to
the host:port.
"""
if isinstance(data, WriteBuffer):
self._write_buffer.append(data)
else:
if len(data) > 0:
self._write_buffer.append(data)
if self.aggressive_write:
self._handle_write()
if self._write_buffer._total_length > 0:
self._register_or_update_event_handler(write=True)
|
thefab/tornadis | tornadis/connection.py | Connection.disconnect | python | def disconnect(self):
if not self.is_connected() and not self.is_connecting():
return
LOG.debug("disconnecting from %s...", self._redis_server())
self.__periodic_callback.stop()
try:
self._ioloop.remove_handler(self.__socket_fileno)
self._listened_events = 0
except Exception:
pass
self.__socket_fileno = -1
try:
self.__socket.close()
except Exception:
pass
self._state.set_disconnected()
self._close_callback()
LOG.debug("disconnected from %s", self._redis_server()) | Disconnects the object.
Safe method (no exception, even if it's already disconnected or if
there are some connection errors). | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/connection.py#L208-L230 | [
"def is_connected(self):\n \"\"\"Returns True if the object is connected.\"\"\"\n return self._state.is_connected()\n"
] | class Connection(object):
"""Low level connection object.
Attributes:
host (string): the host name to connect to.
port (int): the port to connect to.
unix_domain_socket (string): path to a unix socket to connect to
(if set, overrides host/port parameters).
read_page_size (int): page size for reading.
write_page_size (int): page size for writing.
connect_timeout (int): timeout (in seconds) for connecting.
tcp_nodelay (boolean): set TCP_NODELAY on socket.
aggressive_write (boolean): try to minimize write latency over
global throughput (default False).
read_timeout (int): timeout (in seconds) to read something on
the socket (if nothing is read during this time, the
connection is closed) (default: 0 means no timeout)
"""
def __init__(self, read_callback, close_callback,
host=tornadis.DEFAULT_HOST,
port=tornadis.DEFAULT_PORT, unix_domain_socket=None,
read_page_size=tornadis.DEFAULT_READ_PAGE_SIZE,
write_page_size=tornadis.DEFAULT_WRITE_PAGE_SIZE,
connect_timeout=tornadis.DEFAULT_CONNECT_TIMEOUT,
tcp_nodelay=False, aggressive_write=False,
read_timeout=tornadis.DEFAULT_READ_TIMEOUT,
ioloop=None):
"""Constructor.
Args:
read_callback: callback called when there is something to read
(private, do not use from Client constructor).
close_callback: callback called when the connection is closed
(private, do not use from Client constructor).
host (string): the host name to connect to.
port (int): the port to connect to.
unix_domain_socket (string): path to a unix socket to connect to
(if set, overrides host/port parameters).
read_page_size (int): page size for reading.
write_page_size (int): page size for writing.
connect_timeout (int): timeout (in seconds) for connecting.
tcp_nodelay (boolean): set TCP_NODELAY on socket.
aggressive_write (boolean): try to minimize write latency over
global throughput (default False).
read_timeout (int): timeout (in seconds) to read something on
the socket (if nothing is read during this time, the
connection is closed) (default: 0 means no timeout)
ioloop (IOLoop): the tornado ioloop to use.
"""
self.host = host
self.port = port
self.unix_domain_socket = unix_domain_socket
self._state = ConnectionState()
self._ioloop = ioloop or tornado.ioloop.IOLoop.instance()
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._on_every_second, 1000)
else:
cb = tornado.ioloop.PeriodicCallback(self._on_every_second, 1000,
self._ioloop)
self.__periodic_callback = cb
self._read_callback = read_callback
self._close_callback = close_callback
self.read_page_size = read_page_size
self.write_page_size = write_page_size
self.connect_timeout = connect_timeout
self.read_timeout = read_timeout
self.tcp_nodelay = tcp_nodelay
self.aggressive_write = aggressive_write
self._write_buffer = WriteBuffer()
self._listened_events = 0
self._last_read = datetime.now()
def _redis_server(self):
if self.unix_domain_socket:
return self.unix_domain_socket
return "%s:%i" % (self.host, self.port)
def is_connecting(self):
"""Returns True if the object is connecting."""
return self._state.is_connecting()
def is_connected(self):
"""Returns True if the object is connected."""
return self._state.is_connected()
@tornado.gen.coroutine
def connect(self):
"""Connects the object to the host:port.
Returns:
Future: a Future object with True as result if the connection
process was ok.
"""
if self.is_connected() or self.is_connecting():
raise tornado.gen.Return(True)
if self.unix_domain_socket is None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.tcp_nodelay:
self.__socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1)
else:
if not os.path.exists(self.unix_domain_socket):
LOG.warning("can't connect to %s, file does not exist",
self.unix_domain_socket)
raise tornado.gen.Return(False)
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.setblocking(0)
self.__periodic_callback.start()
try:
LOG.debug("connecting to %s...", self._redis_server())
self._state.set_connecting()
if self.unix_domain_socket is None:
self.__socket.connect((self.host, self.port))
else:
self.__socket.connect(self.unix_domain_socket)
except socket.error as e:
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
self.disconnect()
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
self.__socket_fileno = self.__socket.fileno()
self._register_or_update_event_handler()
yield self._state.get_changed_state_future()
if not self.is_connected():
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
else:
LOG.debug("connected to %s", self._redis_server())
self.__socket_fileno = self.__socket.fileno()
self._state.set_connected()
self._register_or_update_event_handler()
raise tornado.gen.Return(True)
def _on_every_second(self):
if self.is_connecting():
dt = self._state.get_last_state_change_timedelta()
if dt.total_seconds() > self.connect_timeout:
self.disconnect()
if self.read_timeout > 0:
dt = datetime.now() - self._last_read
if dt.total_seconds() > self.read_timeout:
LOG.warning("read timeout => disconnecting")
self.disconnect()
def _register_or_update_event_handler(self, write=True):
if write:
listened_events = READ_EVENT | WRITE_EVENT | ERROR_EVENT
else:
listened_events = READ_EVENT | ERROR_EVENT
if self._listened_events == 0:
try:
self._ioloop.add_handler(self.__socket_fileno,
self._handle_events, listened_events)
except (OSError, IOError, ValueError):
self.disconnect()
return
else:
if self._listened_events != listened_events:
try:
self._ioloop.update_handler(self.__socket_fileno,
listened_events)
except (OSError, IOError, ValueError):
self.disconnect()
return
self._listened_events = listened_events
def _handle_events(self, fd, event):
if self.is_connecting():
err = self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
LOG.debug("connecting error in _handle_events")
self.disconnect()
return
self._state.set_connected()
LOG.debug("connected to %s", self._redis_server())
if not self.is_connected():
return
if event & self._ioloop.READ:
self._handle_read()
if not self.is_connected():
return
if event & self._ioloop.WRITE:
self._handle_write()
if not self.is_connected():
return
if event & self._ioloop.ERROR:
LOG.debug("unknown socket error")
self.disconnect()
def _handle_read(self):
chunk = self._read(self.read_page_size)
if chunk is not None:
if self.read_timeout > 0:
self._last_read = datetime.now()
self._read_callback(chunk)
def _handle_write(self):
while not self._write_buffer.is_empty():
ps = self.write_page_size
data = self._write_buffer.pop_chunk(ps)
if len(data) > 0:
try:
size = self.__socket.send(data)
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
LOG.debug("write would block")
self._write_buffer.appendleft(data)
break
else:
self.disconnect()
return
else:
LOG.debug("%i bytes written to the socket", size)
if size < len(data):
self._write_buffer.appendleft(data[size:])
break
if self._write_buffer.is_empty():
self._register_or_update_event_handler(write=False)
def _read(self, size):
try:
chunk = self.__socket.recv(size)
chunk_length = len(chunk)
if chunk_length > 0:
LOG.debug("%i bytes read from socket", chunk_length)
return chunk
else:
LOG.debug("closed socket => disconnecting")
self.disconnect()
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
LOG.debug("read would block")
return None
else:
self.disconnect()
def write(self, data):
"""Buffers some data to be sent to the host:port in a non blocking way.
So the data is always buffered and not sent on the socket in a
synchronous way.
You can give a WriteBuffer as parameter. The internal Connection
WriteBuffer will be extended with this one (without copying).
Args:
data (str or WriteBuffer): string (or WriteBuffer) to write to
the host:port.
"""
if isinstance(data, WriteBuffer):
self._write_buffer.append(data)
else:
if len(data) > 0:
self._write_buffer.append(data)
if self.aggressive_write:
self._handle_write()
if self._write_buffer._total_length > 0:
self._register_or_update_event_handler(write=True)
|
thefab/tornadis | tornadis/connection.py | Connection.write | python | def write(self, data):
if isinstance(data, WriteBuffer):
self._write_buffer.append(data)
else:
if len(data) > 0:
self._write_buffer.append(data)
if self.aggressive_write:
self._handle_write()
if self._write_buffer._total_length > 0:
self._register_or_update_event_handler(write=True) | Buffers some data to be sent to the host:port in a non blocking way.
So the data is always buffered and not sent on the socket in a
synchronous way.
You can give a WriteBuffer as parameter. The internal Connection
WriteBuffer will be extended with this one (without copying).
Args:
data (str or WriteBuffer): string (or WriteBuffer) to write to
the host:port. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/connection.py#L302-L323 | [
"def _register_or_update_event_handler(self, write=True):\n if write:\n listened_events = READ_EVENT | WRITE_EVENT | ERROR_EVENT\n else:\n listened_events = READ_EVENT | ERROR_EVENT\n if self._listened_events == 0:\n try:\n self._ioloop.add_handler(self.__socket_fileno,\n ... | class Connection(object):
"""Low level connection object.
Attributes:
host (string): the host name to connect to.
port (int): the port to connect to.
unix_domain_socket (string): path to a unix socket to connect to
(if set, overrides host/port parameters).
read_page_size (int): page size for reading.
write_page_size (int): page size for writing.
connect_timeout (int): timeout (in seconds) for connecting.
tcp_nodelay (boolean): set TCP_NODELAY on socket.
aggressive_write (boolean): try to minimize write latency over
global throughput (default False).
read_timeout (int): timeout (in seconds) to read something on
the socket (if nothing is read during this time, the
connection is closed) (default: 0 means no timeout)
"""
def __init__(self, read_callback, close_callback,
host=tornadis.DEFAULT_HOST,
port=tornadis.DEFAULT_PORT, unix_domain_socket=None,
read_page_size=tornadis.DEFAULT_READ_PAGE_SIZE,
write_page_size=tornadis.DEFAULT_WRITE_PAGE_SIZE,
connect_timeout=tornadis.DEFAULT_CONNECT_TIMEOUT,
tcp_nodelay=False, aggressive_write=False,
read_timeout=tornadis.DEFAULT_READ_TIMEOUT,
ioloop=None):
"""Constructor.
Args:
read_callback: callback called when there is something to read
(private, do not use from Client constructor).
close_callback: callback called when the connection is closed
(private, do not use from Client constructor).
host (string): the host name to connect to.
port (int): the port to connect to.
unix_domain_socket (string): path to a unix socket to connect to
(if set, overrides host/port parameters).
read_page_size (int): page size for reading.
write_page_size (int): page size for writing.
connect_timeout (int): timeout (in seconds) for connecting.
tcp_nodelay (boolean): set TCP_NODELAY on socket.
aggressive_write (boolean): try to minimize write latency over
global throughput (default False).
read_timeout (int): timeout (in seconds) to read something on
the socket (if nothing is read during this time, the
connection is closed) (default: 0 means no timeout)
ioloop (IOLoop): the tornado ioloop to use.
"""
self.host = host
self.port = port
self.unix_domain_socket = unix_domain_socket
self._state = ConnectionState()
self._ioloop = ioloop or tornado.ioloop.IOLoop.instance()
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._on_every_second, 1000)
else:
cb = tornado.ioloop.PeriodicCallback(self._on_every_second, 1000,
self._ioloop)
self.__periodic_callback = cb
self._read_callback = read_callback
self._close_callback = close_callback
self.read_page_size = read_page_size
self.write_page_size = write_page_size
self.connect_timeout = connect_timeout
self.read_timeout = read_timeout
self.tcp_nodelay = tcp_nodelay
self.aggressive_write = aggressive_write
self._write_buffer = WriteBuffer()
self._listened_events = 0
self._last_read = datetime.now()
def _redis_server(self):
if self.unix_domain_socket:
return self.unix_domain_socket
return "%s:%i" % (self.host, self.port)
def is_connecting(self):
"""Returns True if the object is connecting."""
return self._state.is_connecting()
def is_connected(self):
"""Returns True if the object is connected."""
return self._state.is_connected()
@tornado.gen.coroutine
def connect(self):
"""Connects the object to the host:port.
Returns:
Future: a Future object with True as result if the connection
process was ok.
"""
if self.is_connected() or self.is_connecting():
raise tornado.gen.Return(True)
if self.unix_domain_socket is None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.tcp_nodelay:
self.__socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1)
else:
if not os.path.exists(self.unix_domain_socket):
LOG.warning("can't connect to %s, file does not exist",
self.unix_domain_socket)
raise tornado.gen.Return(False)
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.setblocking(0)
self.__periodic_callback.start()
try:
LOG.debug("connecting to %s...", self._redis_server())
self._state.set_connecting()
if self.unix_domain_socket is None:
self.__socket.connect((self.host, self.port))
else:
self.__socket.connect(self.unix_domain_socket)
except socket.error as e:
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
self.disconnect()
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
self.__socket_fileno = self.__socket.fileno()
self._register_or_update_event_handler()
yield self._state.get_changed_state_future()
if not self.is_connected():
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
else:
LOG.debug("connected to %s", self._redis_server())
self.__socket_fileno = self.__socket.fileno()
self._state.set_connected()
self._register_or_update_event_handler()
raise tornado.gen.Return(True)
def _on_every_second(self):
if self.is_connecting():
dt = self._state.get_last_state_change_timedelta()
if dt.total_seconds() > self.connect_timeout:
self.disconnect()
if self.read_timeout > 0:
dt = datetime.now() - self._last_read
if dt.total_seconds() > self.read_timeout:
LOG.warning("read timeout => disconnecting")
self.disconnect()
def _register_or_update_event_handler(self, write=True):
if write:
listened_events = READ_EVENT | WRITE_EVENT | ERROR_EVENT
else:
listened_events = READ_EVENT | ERROR_EVENT
if self._listened_events == 0:
try:
self._ioloop.add_handler(self.__socket_fileno,
self._handle_events, listened_events)
except (OSError, IOError, ValueError):
self.disconnect()
return
else:
if self._listened_events != listened_events:
try:
self._ioloop.update_handler(self.__socket_fileno,
listened_events)
except (OSError, IOError, ValueError):
self.disconnect()
return
self._listened_events = listened_events
def disconnect(self):
"""Disconnects the object.
Safe method (no exception, even if it's already disconnected or if
there are some connection errors).
"""
if not self.is_connected() and not self.is_connecting():
return
LOG.debug("disconnecting from %s...", self._redis_server())
self.__periodic_callback.stop()
try:
self._ioloop.remove_handler(self.__socket_fileno)
self._listened_events = 0
except Exception:
pass
self.__socket_fileno = -1
try:
self.__socket.close()
except Exception:
pass
self._state.set_disconnected()
self._close_callback()
LOG.debug("disconnected from %s", self._redis_server())
def _handle_events(self, fd, event):
if self.is_connecting():
err = self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
LOG.debug("connecting error in _handle_events")
self.disconnect()
return
self._state.set_connected()
LOG.debug("connected to %s", self._redis_server())
if not self.is_connected():
return
if event & self._ioloop.READ:
self._handle_read()
if not self.is_connected():
return
if event & self._ioloop.WRITE:
self._handle_write()
if not self.is_connected():
return
if event & self._ioloop.ERROR:
LOG.debug("unknown socket error")
self.disconnect()
def _handle_read(self):
chunk = self._read(self.read_page_size)
if chunk is not None:
if self.read_timeout > 0:
self._last_read = datetime.now()
self._read_callback(chunk)
def _handle_write(self):
while not self._write_buffer.is_empty():
ps = self.write_page_size
data = self._write_buffer.pop_chunk(ps)
if len(data) > 0:
try:
size = self.__socket.send(data)
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
LOG.debug("write would block")
self._write_buffer.appendleft(data)
break
else:
self.disconnect()
return
else:
LOG.debug("%i bytes written to the socket", size)
if size < len(data):
self._write_buffer.appendleft(data[size:])
break
if self._write_buffer.is_empty():
self._register_or_update_event_handler(write=False)
def _read(self, size):
try:
chunk = self.__socket.recv(size)
chunk_length = len(chunk)
if chunk_length > 0:
LOG.debug("%i bytes read from socket", chunk_length)
return chunk
else:
LOG.debug("closed socket => disconnecting")
self.disconnect()
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
LOG.debug("read would block")
return None
else:
self.disconnect()
|
thefab/tornadis | tornadis/client.py | Client.connect | python | def connect(self):
if self.is_connected():
raise tornado.gen.Return(True)
cb1 = self._read_callback
cb2 = self._close_callback
self.__callback_queue = collections.deque()
self._reply_list = []
self.__reader = hiredis.Reader(replyError=ClientError)
kwargs = self.connection_kwargs
self.__connection = Connection(cb1, cb2, **kwargs)
connection_status = yield self.__connection.connect()
if connection_status is not True:
# nothing left to do here, return
raise tornado.gen.Return(False)
if self.password is not None:
authentication_status = yield self._call('AUTH', self.password)
if authentication_status != b'OK':
# incorrect password, return back the result
LOG.warning("impossible to connect: bad password")
self.__connection.disconnect()
raise tornado.gen.Return(False)
if self.db != 0:
db_status = yield self._call('SELECT', self.db)
if db_status != b'OK':
LOG.warning("can't select db %s", self.db)
raise tornado.gen.Return(False)
raise tornado.gen.Return(True) | Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/client.py#L84-L118 | [
"def is_connected(self):\n \"\"\"Returns True is the client is connected to redis.\n\n Returns:\n True if the client if connected to redis.\n \"\"\"\n return (self.__connection is not None) and \\\n (self.__connection.is_connected())\n"
] | class Client(object):
"""High level object to interact with redis.
Attributes:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
connection_kwargs (dict): :class:`Connection` object
kwargs (note that read_callback and close_callback args are
set automatically).
"""
def __init__(self, autoconnect=True, password=None, db=0,
**connection_kwargs):
"""Constructor.
Args:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
**connection_kwargs: :class:`Connection` object kwargs.
"""
if 'read_callback' in connection_kwargs or \
'close_callback' in connection_kwargs:
raise Exception("read_callback and close_callback are not allowed "
"to be used here.")
self.connection_kwargs = connection_kwargs
self.autoconnect = autoconnect
self.password = password
self.db = db
self.__connection = None
self.subscribed = False
self.__connection = None
self.__reader = None
# Used for normal clients
self.__callback_queue = None
# Used for subscribed clients
self._condition = tornado.locks.Condition()
self._reply_list = None
@property
def title(self):
return self.__connection._redis_server()
def is_connected(self):
"""Returns True is the client is connected to redis.
Returns:
True if the client if connected to redis.
"""
return (self.__connection is not None) and \
(self.__connection.is_connected())
@tornado.gen.coroutine
def disconnect(self):
"""Disconnects the client object from redis.
It's safe to use this method even if you are already disconnected.
"""
if not self.is_connected():
return
if self.__connection is not None:
self.__connection.disconnect()
def _close_callback(self):
"""Callback called when redis closed the connection.
The callback queue is emptied and we call each callback found
with None or with an exception object to wake up blocked client.
"""
while True:
try:
callback = self.__callback_queue.popleft()
callback(ConnectionError("closed connection"))
except IndexError:
break
if self.subscribed:
# pubsub clients
self._reply_list.append(ConnectionError("closed connection"))
self._condition.notify_all()
def _read_callback(self, data=None):
"""Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
"""
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect()
def call(self, *args, **kwargs):
"""Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded redis reply as result (when available) or
a ConnectionError object in case of connection error.
Raises:
ClientError: your Pipeline object is empty.
Examples:
>>> @tornado.gen.coroutine
def foobar():
client = Client()
result = yield client.call("HSET", "key", "field", "val")
"""
if not self.is_connected():
if self.autoconnect:
# We use this method only when we are not contected
# to void performance penaly due to gen.coroutine decorator
return self._call_with_autoconnect(*args, **kwargs)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
return tornado.gen.maybe_future(error)
return self._call(*args, **kwargs)
@tornado.gen.coroutine
def _call_with_autoconnect(self, *args, **kwargs):
yield self.connect()
if not self.is_connected():
raise tornado.gen.Return(ConnectionError("impossible to connect"))
res = yield self._call(*args, **kwargs)
raise tornado.gen.Return(res)
def async_call(self, *args, **kwargs):
"""Calls a redis command, waits for the reply and call a callback.
Following options are available (not part of the redis command itself):
- callback
Function called (with the result as argument) when the result
is available. If not set, the reply is silently discarded. In
case of errors, the callback is called with a
TornadisException object as argument.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: options as keyword parameters.
Examples:
>>> def cb(result):
pass
>>> client.async_call("HSET", "key", "field", "val", callback=cb)
"""
def after_autoconnect_callback(future):
if self.is_connected():
self._call(*args, **kwargs)
else:
# FIXME
pass
if 'callback' not in kwargs:
kwargs['callback'] = discard_reply_cb
if not self.is_connected():
if self.autoconnect:
connect_future = self.connect()
cb = after_autoconnect_callback
self.__connection._ioloop.add_future(connect_future, cb)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
kwargs['callback'](error)
else:
self._call(*args, **kwargs)
def _call(self, *args, **kwargs):
callback = False
if 'callback' in kwargs:
callback = True
if len(args) == 1 and isinstance(args[0], Pipeline):
fn = self._pipelined_call
pipeline = args[0]
if pipeline.number_of_stacked_calls == 0:
excep = ClientError("empty pipeline")
if callback:
kwargs['callback'](excep)
else:
return tornado.gen.maybe_future(excep)
arguments = (pipeline,)
else:
if "__multiple_replies" in kwargs:
fn = self._simple_call_with_multiple_replies
arguments = tuple([kwargs["__multiple_replies"]] + list(args))
else:
fn = self._simple_call
arguments = args
if callback:
fn(*arguments, **kwargs)
else:
return tornado.gen.Task(fn, *arguments, **kwargs)
def _reply_aggregator(self, callback, replies, reply):
self._reply_list.append(reply)
if len(self._reply_list) == replies:
callback(self._reply_list)
self._reply_list = []
def _simple_call(self, *args, **kwargs):
callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _simple_call_with_multiple_replies(self, replies, *args, **kwargs):
original_callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
callback = functools.partial(self._reply_aggregator, original_callback,
replies)
for _ in range(0, replies):
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _pipelined_call(self, pipeline, callback):
buf = WriteBuffer()
replies = len(pipeline.pipelined_args)
cb = functools.partial(self._reply_aggregator, callback, replies)
for args in pipeline.pipelined_args:
self.__callback_queue.append(cb)
tmp_buf = format_args_in_redis_protocol(*args)
buf.append(tmp_buf)
self.__connection.write(buf)
def get_last_state_change_timedelta(self):
return self.__connection._state.get_last_state_change_timedelta()
|
thefab/tornadis | tornadis/client.py | Client._close_callback | python | def _close_callback(self):
while True:
try:
callback = self.__callback_queue.popleft()
callback(ConnectionError("closed connection"))
except IndexError:
break
if self.subscribed:
# pubsub clients
self._reply_list.append(ConnectionError("closed connection"))
self._condition.notify_all() | Callback called when redis closed the connection.
The callback queue is emptied and we call each callback found
with None or with an exception object to wake up blocked client. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/client.py#L130-L145 | null | class Client(object):
"""High level object to interact with redis.
Attributes:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
connection_kwargs (dict): :class:`Connection` object
kwargs (note that read_callback and close_callback args are
set automatically).
"""
def __init__(self, autoconnect=True, password=None, db=0,
**connection_kwargs):
"""Constructor.
Args:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
**connection_kwargs: :class:`Connection` object kwargs.
"""
if 'read_callback' in connection_kwargs or \
'close_callback' in connection_kwargs:
raise Exception("read_callback and close_callback are not allowed "
"to be used here.")
self.connection_kwargs = connection_kwargs
self.autoconnect = autoconnect
self.password = password
self.db = db
self.__connection = None
self.subscribed = False
self.__connection = None
self.__reader = None
# Used for normal clients
self.__callback_queue = None
# Used for subscribed clients
self._condition = tornado.locks.Condition()
self._reply_list = None
@property
def title(self):
return self.__connection._redis_server()
def is_connected(self):
"""Returns True is the client is connected to redis.
Returns:
True if the client if connected to redis.
"""
return (self.__connection is not None) and \
(self.__connection.is_connected())
@tornado.gen.coroutine
def connect(self):
"""Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
"""
if self.is_connected():
raise tornado.gen.Return(True)
cb1 = self._read_callback
cb2 = self._close_callback
self.__callback_queue = collections.deque()
self._reply_list = []
self.__reader = hiredis.Reader(replyError=ClientError)
kwargs = self.connection_kwargs
self.__connection = Connection(cb1, cb2, **kwargs)
connection_status = yield self.__connection.connect()
if connection_status is not True:
# nothing left to do here, return
raise tornado.gen.Return(False)
if self.password is not None:
authentication_status = yield self._call('AUTH', self.password)
if authentication_status != b'OK':
# incorrect password, return back the result
LOG.warning("impossible to connect: bad password")
self.__connection.disconnect()
raise tornado.gen.Return(False)
if self.db != 0:
db_status = yield self._call('SELECT', self.db)
if db_status != b'OK':
LOG.warning("can't select db %s", self.db)
raise tornado.gen.Return(False)
raise tornado.gen.Return(True)
def disconnect(self):
"""Disconnects the client object from redis.
It's safe to use this method even if you are already disconnected.
"""
if not self.is_connected():
return
if self.__connection is not None:
self.__connection.disconnect()
def _read_callback(self, data=None):
"""Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
"""
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect()
def call(self, *args, **kwargs):
"""Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded redis reply as result (when available) or
a ConnectionError object in case of connection error.
Raises:
ClientError: your Pipeline object is empty.
Examples:
>>> @tornado.gen.coroutine
def foobar():
client = Client()
result = yield client.call("HSET", "key", "field", "val")
"""
if not self.is_connected():
if self.autoconnect:
# We use this method only when we are not contected
# to void performance penaly due to gen.coroutine decorator
return self._call_with_autoconnect(*args, **kwargs)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
return tornado.gen.maybe_future(error)
return self._call(*args, **kwargs)
@tornado.gen.coroutine
def _call_with_autoconnect(self, *args, **kwargs):
yield self.connect()
if not self.is_connected():
raise tornado.gen.Return(ConnectionError("impossible to connect"))
res = yield self._call(*args, **kwargs)
raise tornado.gen.Return(res)
def async_call(self, *args, **kwargs):
"""Calls a redis command, waits for the reply and call a callback.
Following options are available (not part of the redis command itself):
- callback
Function called (with the result as argument) when the result
is available. If not set, the reply is silently discarded. In
case of errors, the callback is called with a
TornadisException object as argument.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: options as keyword parameters.
Examples:
>>> def cb(result):
pass
>>> client.async_call("HSET", "key", "field", "val", callback=cb)
"""
def after_autoconnect_callback(future):
if self.is_connected():
self._call(*args, **kwargs)
else:
# FIXME
pass
if 'callback' not in kwargs:
kwargs['callback'] = discard_reply_cb
if not self.is_connected():
if self.autoconnect:
connect_future = self.connect()
cb = after_autoconnect_callback
self.__connection._ioloop.add_future(connect_future, cb)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
kwargs['callback'](error)
else:
self._call(*args, **kwargs)
def _call(self, *args, **kwargs):
callback = False
if 'callback' in kwargs:
callback = True
if len(args) == 1 and isinstance(args[0], Pipeline):
fn = self._pipelined_call
pipeline = args[0]
if pipeline.number_of_stacked_calls == 0:
excep = ClientError("empty pipeline")
if callback:
kwargs['callback'](excep)
else:
return tornado.gen.maybe_future(excep)
arguments = (pipeline,)
else:
if "__multiple_replies" in kwargs:
fn = self._simple_call_with_multiple_replies
arguments = tuple([kwargs["__multiple_replies"]] + list(args))
else:
fn = self._simple_call
arguments = args
if callback:
fn(*arguments, **kwargs)
else:
return tornado.gen.Task(fn, *arguments, **kwargs)
def _reply_aggregator(self, callback, replies, reply):
self._reply_list.append(reply)
if len(self._reply_list) == replies:
callback(self._reply_list)
self._reply_list = []
def _simple_call(self, *args, **kwargs):
callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _simple_call_with_multiple_replies(self, replies, *args, **kwargs):
original_callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
callback = functools.partial(self._reply_aggregator, original_callback,
replies)
for _ in range(0, replies):
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _pipelined_call(self, pipeline, callback):
buf = WriteBuffer()
replies = len(pipeline.pipelined_args)
cb = functools.partial(self._reply_aggregator, callback, replies)
for args in pipeline.pipelined_args:
self.__callback_queue.append(cb)
tmp_buf = format_args_in_redis_protocol(*args)
buf.append(tmp_buf)
self.__connection.write(buf)
def get_last_state_change_timedelta(self):
return self.__connection._state.get_last_state_change_timedelta()
|
thefab/tornadis | tornadis/client.py | Client._read_callback | python | def _read_callback(self, data=None):
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect() | Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket. | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/client.py#L147-L175 | null | class Client(object):
"""High level object to interact with redis.
Attributes:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
connection_kwargs (dict): :class:`Connection` object
kwargs (note that read_callback and close_callback args are
set automatically).
"""
def __init__(self, autoconnect=True, password=None, db=0,
**connection_kwargs):
"""Constructor.
Args:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
**connection_kwargs: :class:`Connection` object kwargs.
"""
if 'read_callback' in connection_kwargs or \
'close_callback' in connection_kwargs:
raise Exception("read_callback and close_callback are not allowed "
"to be used here.")
self.connection_kwargs = connection_kwargs
self.autoconnect = autoconnect
self.password = password
self.db = db
self.__connection = None
self.subscribed = False
self.__connection = None
self.__reader = None
# Used for normal clients
self.__callback_queue = None
# Used for subscribed clients
self._condition = tornado.locks.Condition()
self._reply_list = None
@property
def title(self):
return self.__connection._redis_server()
def is_connected(self):
"""Returns True is the client is connected to redis.
Returns:
True if the client if connected to redis.
"""
return (self.__connection is not None) and \
(self.__connection.is_connected())
@tornado.gen.coroutine
def connect(self):
"""Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
"""
if self.is_connected():
raise tornado.gen.Return(True)
cb1 = self._read_callback
cb2 = self._close_callback
self.__callback_queue = collections.deque()
self._reply_list = []
self.__reader = hiredis.Reader(replyError=ClientError)
kwargs = self.connection_kwargs
self.__connection = Connection(cb1, cb2, **kwargs)
connection_status = yield self.__connection.connect()
if connection_status is not True:
# nothing left to do here, return
raise tornado.gen.Return(False)
if self.password is not None:
authentication_status = yield self._call('AUTH', self.password)
if authentication_status != b'OK':
# incorrect password, return back the result
LOG.warning("impossible to connect: bad password")
self.__connection.disconnect()
raise tornado.gen.Return(False)
if self.db != 0:
db_status = yield self._call('SELECT', self.db)
if db_status != b'OK':
LOG.warning("can't select db %s", self.db)
raise tornado.gen.Return(False)
raise tornado.gen.Return(True)
def disconnect(self):
"""Disconnects the client object from redis.
It's safe to use this method even if you are already disconnected.
"""
if not self.is_connected():
return
if self.__connection is not None:
self.__connection.disconnect()
def _close_callback(self):
"""Callback called when redis closed the connection.
The callback queue is emptied and we call each callback found
with None or with an exception object to wake up blocked client.
"""
while True:
try:
callback = self.__callback_queue.popleft()
callback(ConnectionError("closed connection"))
except IndexError:
break
if self.subscribed:
# pubsub clients
self._reply_list.append(ConnectionError("closed connection"))
self._condition.notify_all()
def call(self, *args, **kwargs):
"""Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded redis reply as result (when available) or
a ConnectionError object in case of connection error.
Raises:
ClientError: your Pipeline object is empty.
Examples:
>>> @tornado.gen.coroutine
def foobar():
client = Client()
result = yield client.call("HSET", "key", "field", "val")
"""
if not self.is_connected():
if self.autoconnect:
# We use this method only when we are not contected
# to void performance penaly due to gen.coroutine decorator
return self._call_with_autoconnect(*args, **kwargs)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
return tornado.gen.maybe_future(error)
return self._call(*args, **kwargs)
@tornado.gen.coroutine
def _call_with_autoconnect(self, *args, **kwargs):
yield self.connect()
if not self.is_connected():
raise tornado.gen.Return(ConnectionError("impossible to connect"))
res = yield self._call(*args, **kwargs)
raise tornado.gen.Return(res)
def async_call(self, *args, **kwargs):
"""Calls a redis command, waits for the reply and call a callback.
Following options are available (not part of the redis command itself):
- callback
Function called (with the result as argument) when the result
is available. If not set, the reply is silently discarded. In
case of errors, the callback is called with a
TornadisException object as argument.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: options as keyword parameters.
Examples:
>>> def cb(result):
pass
>>> client.async_call("HSET", "key", "field", "val", callback=cb)
"""
def after_autoconnect_callback(future):
if self.is_connected():
self._call(*args, **kwargs)
else:
# FIXME
pass
if 'callback' not in kwargs:
kwargs['callback'] = discard_reply_cb
if not self.is_connected():
if self.autoconnect:
connect_future = self.connect()
cb = after_autoconnect_callback
self.__connection._ioloop.add_future(connect_future, cb)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
kwargs['callback'](error)
else:
self._call(*args, **kwargs)
def _call(self, *args, **kwargs):
callback = False
if 'callback' in kwargs:
callback = True
if len(args) == 1 and isinstance(args[0], Pipeline):
fn = self._pipelined_call
pipeline = args[0]
if pipeline.number_of_stacked_calls == 0:
excep = ClientError("empty pipeline")
if callback:
kwargs['callback'](excep)
else:
return tornado.gen.maybe_future(excep)
arguments = (pipeline,)
else:
if "__multiple_replies" in kwargs:
fn = self._simple_call_with_multiple_replies
arguments = tuple([kwargs["__multiple_replies"]] + list(args))
else:
fn = self._simple_call
arguments = args
if callback:
fn(*arguments, **kwargs)
else:
return tornado.gen.Task(fn, *arguments, **kwargs)
def _reply_aggregator(self, callback, replies, reply):
self._reply_list.append(reply)
if len(self._reply_list) == replies:
callback(self._reply_list)
self._reply_list = []
def _simple_call(self, *args, **kwargs):
callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _simple_call_with_multiple_replies(self, replies, *args, **kwargs):
original_callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
callback = functools.partial(self._reply_aggregator, original_callback,
replies)
for _ in range(0, replies):
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _pipelined_call(self, pipeline, callback):
buf = WriteBuffer()
replies = len(pipeline.pipelined_args)
cb = functools.partial(self._reply_aggregator, callback, replies)
for args in pipeline.pipelined_args:
self.__callback_queue.append(cb)
tmp_buf = format_args_in_redis_protocol(*args)
buf.append(tmp_buf)
self.__connection.write(buf)
def get_last_state_change_timedelta(self):
return self.__connection._state.get_last_state_change_timedelta()
|
thefab/tornadis | tornadis/client.py | Client.call | python | def call(self, *args, **kwargs):
if not self.is_connected():
if self.autoconnect:
# We use this method only when we are not contected
# to void performance penaly due to gen.coroutine decorator
return self._call_with_autoconnect(*args, **kwargs)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
return tornado.gen.maybe_future(error)
return self._call(*args, **kwargs) | Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded redis reply as result (when available) or
a ConnectionError object in case of connection error.
Raises:
ClientError: your Pipeline object is empty.
Examples:
>>> @tornado.gen.coroutine
def foobar():
client = Client()
result = yield client.call("HSET", "key", "field", "val") | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/client.py#L177-L208 | [
"def is_connected(self):\n \"\"\"Returns True is the client is connected to redis.\n\n Returns:\n True if the client if connected to redis.\n \"\"\"\n return (self.__connection is not None) and \\\n (self.__connection.is_connected())\n"
] | class Client(object):
"""High level object to interact with redis.
Attributes:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
connection_kwargs (dict): :class:`Connection` object
kwargs (note that read_callback and close_callback args are
set automatically).
"""
def __init__(self, autoconnect=True, password=None, db=0,
**connection_kwargs):
"""Constructor.
Args:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
**connection_kwargs: :class:`Connection` object kwargs.
"""
if 'read_callback' in connection_kwargs or \
'close_callback' in connection_kwargs:
raise Exception("read_callback and close_callback are not allowed "
"to be used here.")
self.connection_kwargs = connection_kwargs
self.autoconnect = autoconnect
self.password = password
self.db = db
self.__connection = None
self.subscribed = False
self.__connection = None
self.__reader = None
# Used for normal clients
self.__callback_queue = None
# Used for subscribed clients
self._condition = tornado.locks.Condition()
self._reply_list = None
@property
def title(self):
return self.__connection._redis_server()
def is_connected(self):
"""Returns True is the client is connected to redis.
Returns:
True if the client if connected to redis.
"""
return (self.__connection is not None) and \
(self.__connection.is_connected())
@tornado.gen.coroutine
def connect(self):
"""Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
"""
if self.is_connected():
raise tornado.gen.Return(True)
cb1 = self._read_callback
cb2 = self._close_callback
self.__callback_queue = collections.deque()
self._reply_list = []
self.__reader = hiredis.Reader(replyError=ClientError)
kwargs = self.connection_kwargs
self.__connection = Connection(cb1, cb2, **kwargs)
connection_status = yield self.__connection.connect()
if connection_status is not True:
# nothing left to do here, return
raise tornado.gen.Return(False)
if self.password is not None:
authentication_status = yield self._call('AUTH', self.password)
if authentication_status != b'OK':
# incorrect password, return back the result
LOG.warning("impossible to connect: bad password")
self.__connection.disconnect()
raise tornado.gen.Return(False)
if self.db != 0:
db_status = yield self._call('SELECT', self.db)
if db_status != b'OK':
LOG.warning("can't select db %s", self.db)
raise tornado.gen.Return(False)
raise tornado.gen.Return(True)
def disconnect(self):
"""Disconnects the client object from redis.
It's safe to use this method even if you are already disconnected.
"""
if not self.is_connected():
return
if self.__connection is not None:
self.__connection.disconnect()
def _close_callback(self):
"""Callback called when redis closed the connection.
The callback queue is emptied and we call each callback found
with None or with an exception object to wake up blocked client.
"""
while True:
try:
callback = self.__callback_queue.popleft()
callback(ConnectionError("closed connection"))
except IndexError:
break
if self.subscribed:
# pubsub clients
self._reply_list.append(ConnectionError("closed connection"))
self._condition.notify_all()
def _read_callback(self, data=None):
"""Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
"""
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect()
@tornado.gen.coroutine
def _call_with_autoconnect(self, *args, **kwargs):
yield self.connect()
if not self.is_connected():
raise tornado.gen.Return(ConnectionError("impossible to connect"))
res = yield self._call(*args, **kwargs)
raise tornado.gen.Return(res)
def async_call(self, *args, **kwargs):
"""Calls a redis command, waits for the reply and call a callback.
Following options are available (not part of the redis command itself):
- callback
Function called (with the result as argument) when the result
is available. If not set, the reply is silently discarded. In
case of errors, the callback is called with a
TornadisException object as argument.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: options as keyword parameters.
Examples:
>>> def cb(result):
pass
>>> client.async_call("HSET", "key", "field", "val", callback=cb)
"""
def after_autoconnect_callback(future):
if self.is_connected():
self._call(*args, **kwargs)
else:
# FIXME
pass
if 'callback' not in kwargs:
kwargs['callback'] = discard_reply_cb
if not self.is_connected():
if self.autoconnect:
connect_future = self.connect()
cb = after_autoconnect_callback
self.__connection._ioloop.add_future(connect_future, cb)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
kwargs['callback'](error)
else:
self._call(*args, **kwargs)
def _call(self, *args, **kwargs):
callback = False
if 'callback' in kwargs:
callback = True
if len(args) == 1 and isinstance(args[0], Pipeline):
fn = self._pipelined_call
pipeline = args[0]
if pipeline.number_of_stacked_calls == 0:
excep = ClientError("empty pipeline")
if callback:
kwargs['callback'](excep)
else:
return tornado.gen.maybe_future(excep)
arguments = (pipeline,)
else:
if "__multiple_replies" in kwargs:
fn = self._simple_call_with_multiple_replies
arguments = tuple([kwargs["__multiple_replies"]] + list(args))
else:
fn = self._simple_call
arguments = args
if callback:
fn(*arguments, **kwargs)
else:
return tornado.gen.Task(fn, *arguments, **kwargs)
def _reply_aggregator(self, callback, replies, reply):
self._reply_list.append(reply)
if len(self._reply_list) == replies:
callback(self._reply_list)
self._reply_list = []
def _simple_call(self, *args, **kwargs):
callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _simple_call_with_multiple_replies(self, replies, *args, **kwargs):
original_callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
callback = functools.partial(self._reply_aggregator, original_callback,
replies)
for _ in range(0, replies):
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _pipelined_call(self, pipeline, callback):
buf = WriteBuffer()
replies = len(pipeline.pipelined_args)
cb = functools.partial(self._reply_aggregator, callback, replies)
for args in pipeline.pipelined_args:
self.__callback_queue.append(cb)
tmp_buf = format_args_in_redis_protocol(*args)
buf.append(tmp_buf)
self.__connection.write(buf)
def get_last_state_change_timedelta(self):
return self.__connection._state.get_last_state_change_timedelta()
|
thefab/tornadis | tornadis/client.py | Client.async_call | python | def async_call(self, *args, **kwargs):
def after_autoconnect_callback(future):
if self.is_connected():
self._call(*args, **kwargs)
else:
# FIXME
pass
if 'callback' not in kwargs:
kwargs['callback'] = discard_reply_cb
if not self.is_connected():
if self.autoconnect:
connect_future = self.connect()
cb = after_autoconnect_callback
self.__connection._ioloop.add_future(connect_future, cb)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
kwargs['callback'](error)
else:
self._call(*args, **kwargs) | Calls a redis command, waits for the reply and call a callback.
Following options are available (not part of the redis command itself):
- callback
Function called (with the result as argument) when the result
is available. If not set, the reply is silently discarded. In
case of errors, the callback is called with a
TornadisException object as argument.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: options as keyword parameters.
Examples:
>>> def cb(result):
pass
>>> client.async_call("HSET", "key", "field", "val", callback=cb) | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/client.py#L218-L259 | [
"def is_connected(self):\n \"\"\"Returns True is the client is connected to redis.\n\n Returns:\n True if the client if connected to redis.\n \"\"\"\n return (self.__connection is not None) and \\\n (self.__connection.is_connected())\n"
] | class Client(object):
"""High level object to interact with redis.
Attributes:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
connection_kwargs (dict): :class:`Connection` object
kwargs (note that read_callback and close_callback args are
set automatically).
"""
def __init__(self, autoconnect=True, password=None, db=0,
**connection_kwargs):
"""Constructor.
Args:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
**connection_kwargs: :class:`Connection` object kwargs.
"""
if 'read_callback' in connection_kwargs or \
'close_callback' in connection_kwargs:
raise Exception("read_callback and close_callback are not allowed "
"to be used here.")
self.connection_kwargs = connection_kwargs
self.autoconnect = autoconnect
self.password = password
self.db = db
self.__connection = None
self.subscribed = False
self.__connection = None
self.__reader = None
# Used for normal clients
self.__callback_queue = None
# Used for subscribed clients
self._condition = tornado.locks.Condition()
self._reply_list = None
@property
def title(self):
return self.__connection._redis_server()
def is_connected(self):
"""Returns True is the client is connected to redis.
Returns:
True if the client if connected to redis.
"""
return (self.__connection is not None) and \
(self.__connection.is_connected())
@tornado.gen.coroutine
def connect(self):
"""Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
"""
if self.is_connected():
raise tornado.gen.Return(True)
cb1 = self._read_callback
cb2 = self._close_callback
self.__callback_queue = collections.deque()
self._reply_list = []
self.__reader = hiredis.Reader(replyError=ClientError)
kwargs = self.connection_kwargs
self.__connection = Connection(cb1, cb2, **kwargs)
connection_status = yield self.__connection.connect()
if connection_status is not True:
# nothing left to do here, return
raise tornado.gen.Return(False)
if self.password is not None:
authentication_status = yield self._call('AUTH', self.password)
if authentication_status != b'OK':
# incorrect password, return back the result
LOG.warning("impossible to connect: bad password")
self.__connection.disconnect()
raise tornado.gen.Return(False)
if self.db != 0:
db_status = yield self._call('SELECT', self.db)
if db_status != b'OK':
LOG.warning("can't select db %s", self.db)
raise tornado.gen.Return(False)
raise tornado.gen.Return(True)
def disconnect(self):
"""Disconnects the client object from redis.
It's safe to use this method even if you are already disconnected.
"""
if not self.is_connected():
return
if self.__connection is not None:
self.__connection.disconnect()
def _close_callback(self):
"""Callback called when redis closed the connection.
The callback queue is emptied and we call each callback found
with None or with an exception object to wake up blocked client.
"""
while True:
try:
callback = self.__callback_queue.popleft()
callback(ConnectionError("closed connection"))
except IndexError:
break
if self.subscribed:
# pubsub clients
self._reply_list.append(ConnectionError("closed connection"))
self._condition.notify_all()
def _read_callback(self, data=None):
"""Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
"""
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect()
def call(self, *args, **kwargs):
"""Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded redis reply as result (when available) or
a ConnectionError object in case of connection error.
Raises:
ClientError: your Pipeline object is empty.
Examples:
>>> @tornado.gen.coroutine
def foobar():
client = Client()
result = yield client.call("HSET", "key", "field", "val")
"""
if not self.is_connected():
if self.autoconnect:
# We use this method only when we are not contected
# to void performance penaly due to gen.coroutine decorator
return self._call_with_autoconnect(*args, **kwargs)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
return tornado.gen.maybe_future(error)
return self._call(*args, **kwargs)
@tornado.gen.coroutine
def _call_with_autoconnect(self, *args, **kwargs):
yield self.connect()
if not self.is_connected():
raise tornado.gen.Return(ConnectionError("impossible to connect"))
res = yield self._call(*args, **kwargs)
raise tornado.gen.Return(res)
def _call(self, *args, **kwargs):
callback = False
if 'callback' in kwargs:
callback = True
if len(args) == 1 and isinstance(args[0], Pipeline):
fn = self._pipelined_call
pipeline = args[0]
if pipeline.number_of_stacked_calls == 0:
excep = ClientError("empty pipeline")
if callback:
kwargs['callback'](excep)
else:
return tornado.gen.maybe_future(excep)
arguments = (pipeline,)
else:
if "__multiple_replies" in kwargs:
fn = self._simple_call_with_multiple_replies
arguments = tuple([kwargs["__multiple_replies"]] + list(args))
else:
fn = self._simple_call
arguments = args
if callback:
fn(*arguments, **kwargs)
else:
return tornado.gen.Task(fn, *arguments, **kwargs)
def _reply_aggregator(self, callback, replies, reply):
self._reply_list.append(reply)
if len(self._reply_list) == replies:
callback(self._reply_list)
self._reply_list = []
def _simple_call(self, *args, **kwargs):
callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _simple_call_with_multiple_replies(self, replies, *args, **kwargs):
original_callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
callback = functools.partial(self._reply_aggregator, original_callback,
replies)
for _ in range(0, replies):
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _pipelined_call(self, pipeline, callback):
buf = WriteBuffer()
replies = len(pipeline.pipelined_args)
cb = functools.partial(self._reply_aggregator, callback, replies)
for args in pipeline.pipelined_args:
self.__callback_queue.append(cb)
tmp_buf = format_args_in_redis_protocol(*args)
buf.append(tmp_buf)
self.__connection.write(buf)
def get_last_state_change_timedelta(self):
return self.__connection._state.get_last_state_change_timedelta()
|
thefab/tornadis | tornadis/utils.py | format_args_in_redis_protocol | python | def format_args_in_redis_protocol(*args):
buf = WriteBuffer()
l = "*%d\r\n" % len(args) # noqa: E741
if six.PY2:
buf.append(l)
else: # pragma: no cover
buf.append(l.encode('utf-8'))
for arg in args:
if isinstance(arg, six.text_type):
# it's a unicode string in Python2 or a standard (unicode)
# string in Python3, let's encode it in utf-8 to get raw bytes
arg = arg.encode('utf-8')
elif isinstance(arg, six.string_types):
# it's a basestring in Python2 => nothing to do
pass
elif isinstance(arg, six.binary_type): # pragma: no cover
# it's a raw bytes string in Python3 => nothing to do
pass
elif isinstance(arg, six.integer_types):
tmp = "%d" % arg
if six.PY2:
arg = tmp
else: # pragma: no cover
arg = tmp.encode('utf-8')
elif isinstance(arg, WriteBuffer):
# it's a WriteBuffer object => nothing to do
pass
else:
raise Exception("don't know what to do with %s" % type(arg))
l = "$%d\r\n" % len(arg) # noqa: E741
if six.PY2:
buf.append(l)
else: # pragma: no cover
buf.append(l.encode('utf-8'))
buf.append(arg)
buf.append(b"\r\n")
return buf | Formats arguments into redis protocol...
This function makes and returns a string/buffer corresponding to
given arguments formated with the redis protocol.
integer, text, string or binary types are automatically converted
(using utf8 if necessary).
More informations about the protocol: http://redis.io/topics/protocol
Args:
*args: full redis command as variable length argument list
Returns:
binary string (arguments in redis protocol)
Examples:
>>> format_args_in_redis_protocol("HSET", "key", "field", "value")
'*4\r\n$4\r\nHSET\r\n$3\r\nkey\r\n$5\r\nfield\r\n$5\r\nvalue\r\n' | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/utils.py#L14-L70 | [
"def append(self, data):\n \"\"\"Appends some data to end of the buffer (right).\n\n No string copy is done during this operation.\n\n Args:\n data: data to put in the buffer (can be string, memoryview or\n another WriteBuffer).\n \"\"\"\n self._append(data, True)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tornadis library released under the MIT license.
# See the LICENSE file for more information.
import six
from tornado.concurrent import Future
import contextlib
from tornadis.write_buffer import WriteBuffer
class ContextManagerFuture(Future):
"""A Future that can be used with the "with" statement.
When a coroutine yields this Future, the return value is a context manager
that can be used like:
>>> with (yield future) as result:
pass
At the end of the block, the Future's exit callback is run.
This class is stolen from "toro" source:
https://github.com/ajdavis/toro/blob/master/toro/__init__.py
Original credits to jesse@mongodb.com
Modified to be able to return the future result
Attributes:
_exit_callback (callable): the exit callback to call at the end of
the block
_wrapped (Future): the wrapped future
"""
def __init__(self, wrapped, exit_callback):
"""Constructor.
Args:
wrapped (Future): the original Future object (to wrap)
exit_callback: the exit callback to call at the end of
the block
"""
Future.__init__(self)
wrapped.add_done_callback(self._done_callback)
self._exit_callback = exit_callback
self._wrapped = wrapped
def _done_callback(self, wrapped):
"""Internal "done callback" to set the result of the object.
The result of the object if forced by the wrapped future. So this
internal callback must be called when the wrapped future is ready.
Args:
wrapped (Future): the wrapped Future object
"""
if wrapped.exception():
self.set_exception(wrapped.exception())
else:
self.set_result(wrapped.result())
def result(self):
"""The result method which returns a context manager
Returns:
ContextManager: The corresponding context manager
"""
if self.exception():
raise self.exception()
# Otherwise return a context manager that cleans up after the block.
@contextlib.contextmanager
def f():
try:
yield self._wrapped.result()
finally:
self._exit_callback()
return f()
|
thefab/tornadis | tornadis/utils.py | ContextManagerFuture._done_callback | python | def _done_callback(self, wrapped):
if wrapped.exception():
self.set_exception(wrapped.exception())
else:
self.set_result(wrapped.result()) | Internal "done callback" to set the result of the object.
The result of the object if forced by the wrapped future. So this
internal callback must be called when the wrapped future is ready.
Args:
wrapped (Future): the wrapped Future object | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/utils.py#L108-L120 | null | class ContextManagerFuture(Future):
"""A Future that can be used with the "with" statement.
When a coroutine yields this Future, the return value is a context manager
that can be used like:
>>> with (yield future) as result:
pass
At the end of the block, the Future's exit callback is run.
This class is stolen from "toro" source:
https://github.com/ajdavis/toro/blob/master/toro/__init__.py
Original credits to jesse@mongodb.com
Modified to be able to return the future result
Attributes:
_exit_callback (callable): the exit callback to call at the end of
the block
_wrapped (Future): the wrapped future
"""
def __init__(self, wrapped, exit_callback):
"""Constructor.
Args:
wrapped (Future): the original Future object (to wrap)
exit_callback: the exit callback to call at the end of
the block
"""
Future.__init__(self)
wrapped.add_done_callback(self._done_callback)
self._exit_callback = exit_callback
self._wrapped = wrapped
def result(self):
"""The result method which returns a context manager
Returns:
ContextManager: The corresponding context manager
"""
if self.exception():
raise self.exception()
# Otherwise return a context manager that cleans up after the block.
@contextlib.contextmanager
def f():
try:
yield self._wrapped.result()
finally:
self._exit_callback()
return f()
|
thefab/tornadis | tornadis/utils.py | ContextManagerFuture.result | python | def result(self):
if self.exception():
raise self.exception()
# Otherwise return a context manager that cleans up after the block.
@contextlib.contextmanager
def f():
try:
yield self._wrapped.result()
finally:
self._exit_callback()
return f() | The result method which returns a context manager
Returns:
ContextManager: The corresponding context manager | train | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/utils.py#L122-L138 | null | class ContextManagerFuture(Future):
"""A Future that can be used with the "with" statement.
When a coroutine yields this Future, the return value is a context manager
that can be used like:
>>> with (yield future) as result:
pass
At the end of the block, the Future's exit callback is run.
This class is stolen from "toro" source:
https://github.com/ajdavis/toro/blob/master/toro/__init__.py
Original credits to jesse@mongodb.com
Modified to be able to return the future result
Attributes:
_exit_callback (callable): the exit callback to call at the end of
the block
_wrapped (Future): the wrapped future
"""
def __init__(self, wrapped, exit_callback):
"""Constructor.
Args:
wrapped (Future): the original Future object (to wrap)
exit_callback: the exit callback to call at the end of
the block
"""
Future.__init__(self)
wrapped.add_done_callback(self._done_callback)
self._exit_callback = exit_callback
self._wrapped = wrapped
def _done_callback(self, wrapped):
"""Internal "done callback" to set the result of the object.
The result of the object if forced by the wrapped future. So this
internal callback must be called when the wrapped future is ready.
Args:
wrapped (Future): the wrapped Future object
"""
if wrapped.exception():
self.set_exception(wrapped.exception())
else:
self.set_result(wrapped.result())
|
pysal/giddy | giddy/components.py | is_component | python | def is_component(w, ids):
components = 0
marks = dict([(node, 0) for node in ids])
q = []
for node in ids:
if marks[node] == 0:
components += 1
q.append(node)
if components > 1:
return False
while q:
node = q.pop()
marks[node] = components
others = [neighbor for neighbor in w.neighbors[node]
if neighbor in ids]
for other in others:
if marks[other] == 0 and other not in q:
q.append(other)
return True | Check if the set of ids form a single connected component
Parameters
----------
w : spatial weights boject
ids : list
identifiers of units that are tested to be a single connected
component
Returns
-------
True : if the list of ids represents a single connected component
False : if the list of ids forms more than a single connected component | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/components.py#L11-L50 | null | """
Checking for connected components in a graph.
"""
__author__ = "Sergio J. Rey <srey@asu.edu>"
__all__ = ["check_contiguity"]
from operator import lt
def check_contiguity(w, neighbors, leaver):
"""Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import libpysal as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>>
"""
ids = neighbors[:]
ids.remove(leaver)
return is_component(w, ids)
class Graph(object):
def __init__(self, undirected=True):
self.nodes = set()
self.edges = {}
self.cluster_lookup = {}
self.no_link = {}
self.undirected = undirected
def add_edge(self, n1, n2, w):
self.nodes.add(n1)
self.nodes.add(n2)
self.edges.setdefault(n1, {}).update({n2: w})
if self.undirected:
self.edges.setdefault(n2, {}).update({n1: w})
def connected_components(self, threshold=0.9, op=lt):
if not self.undirected:
warn = "Warning, connected _components not "
warn += "defined for a directed graph"
print(warn)
return None
else:
nodes = set(self.nodes)
components, visited = [], set()
while len(nodes) > 0:
connected, visited = self.dfs(
nodes.pop(), visited, threshold, op)
connected = set(connected)
for node in connected:
if node in nodes:
nodes.remove(node)
subgraph = Graph()
subgraph.nodes = connected
subgraph.no_link = self.no_link
for s in subgraph.nodes:
for k, v in list(self.edges.get(s, {}).items()):
if k in subgraph.nodes:
subgraph.edges.setdefault(s, {}).update({k: v})
if s in self.cluster_lookup:
subgraph.cluster_lookup[s] = self.cluster_lookup[s]
components.append(subgraph)
return components
def dfs(self, v, visited, threshold, op=lt, first=None):
aux = [v]
visited.add(v)
if first is None:
first = v
for i in (n for n, w in list(self.edges.get(v, {}).items())
if op(w, threshold) and n not in visited):
x, y = self.dfs(i, visited, threshold, op, first)
aux.extend(x)
visited = visited.union(y)
return aux, visited
|
pysal/giddy | giddy/components.py | check_contiguity | python | def check_contiguity(w, neighbors, leaver):
ids = neighbors[:]
ids.remove(leaver)
return is_component(w, ids) | Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import libpysal as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>> | train | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/components.py#L53-L103 | [
"def is_component(w, ids):\n \"\"\"Check if the set of ids form a single connected component\n\n Parameters\n ----------\n\n w : spatial weights boject\n\n ids : list\n identifiers of units that are tested to be a single connected\n component\n\n\n Returns\n -------\n\n T... | """
Checking for connected components in a graph.
"""
__author__ = "Sergio J. Rey <srey@asu.edu>"
__all__ = ["check_contiguity"]
from operator import lt
def is_component(w, ids):
"""Check if the set of ids form a single connected component
Parameters
----------
w : spatial weights boject
ids : list
identifiers of units that are tested to be a single connected
component
Returns
-------
True : if the list of ids represents a single connected component
False : if the list of ids forms more than a single connected component
"""
components = 0
marks = dict([(node, 0) for node in ids])
q = []
for node in ids:
if marks[node] == 0:
components += 1
q.append(node)
if components > 1:
return False
while q:
node = q.pop()
marks[node] = components
others = [neighbor for neighbor in w.neighbors[node]
if neighbor in ids]
for other in others:
if marks[other] == 0 and other not in q:
q.append(other)
return True
class Graph(object):
def __init__(self, undirected=True):
self.nodes = set()
self.edges = {}
self.cluster_lookup = {}
self.no_link = {}
self.undirected = undirected
def add_edge(self, n1, n2, w):
self.nodes.add(n1)
self.nodes.add(n2)
self.edges.setdefault(n1, {}).update({n2: w})
if self.undirected:
self.edges.setdefault(n2, {}).update({n1: w})
def connected_components(self, threshold=0.9, op=lt):
if not self.undirected:
warn = "Warning, connected _components not "
warn += "defined for a directed graph"
print(warn)
return None
else:
nodes = set(self.nodes)
components, visited = [], set()
while len(nodes) > 0:
connected, visited = self.dfs(
nodes.pop(), visited, threshold, op)
connected = set(connected)
for node in connected:
if node in nodes:
nodes.remove(node)
subgraph = Graph()
subgraph.nodes = connected
subgraph.no_link = self.no_link
for s in subgraph.nodes:
for k, v in list(self.edges.get(s, {}).items()):
if k in subgraph.nodes:
subgraph.edges.setdefault(s, {}).update({k: v})
if s in self.cluster_lookup:
subgraph.cluster_lookup[s] = self.cluster_lookup[s]
components.append(subgraph)
return components
def dfs(self, v, visited, threshold, op=lt, first=None):
aux = [v]
visited.add(v)
if first is None:
first = v
for i in (n for n, w in list(self.edges.get(v, {}).items())
if op(w, threshold) and n not in visited):
x, y = self.dfs(i, visited, threshold, op, first)
aux.extend(x)
visited = visited.union(y)
return aux, visited
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.