id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
13,700
inveniosoftware/invenio-records-rest
invenio_records_rest/links.py
default_links_factory_with_additional
def default_links_factory_with_additional(additional_links): """Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory. """ def factory(pid, **kwargs): links = default_links_factory(pid) for link in additional_links: links[link] = additional_links[link].format(pid=pid, scheme=request.scheme, host=request.host) return links return factory
python
def default_links_factory_with_additional(additional_links): def factory(pid, **kwargs): links = default_links_factory(pid) for link in additional_links: links[link] = additional_links[link].format(pid=pid, scheme=request.scheme, host=request.host) return links return factory
[ "def", "default_links_factory_with_additional", "(", "additional_links", ")", ":", "def", "factory", "(", "pid", ",", "*", "*", "kwargs", ")", ":", "links", "=", "default_links_factory", "(", "pid", ")", "for", "link", "in", "additional_links", ":", "links", "...
Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory.
[ "Generate", "a", "links", "generation", "factory", "with", "the", "specified", "additional", "links", "." ]
e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/links.py#L33-L48
13,701
inveniosoftware/invenio-records-rest
invenio_records_rest/sorter.py
geolocation_sort
def geolocation_sort(field_name, argument, unit, mode=None, distance_type=None): """Sort field factory for geo-location based sorting. :param argument: Name of URL query string field to parse pin location from. Multiple locations can be provided. Each location can be either a string "latitude,longitude" or a geohash. :param unit: Distance unit (e.g. km). :param mode: Sort mode (avg, min, max). :param distance_type: Distance calculation mode. :returns: Function that returns geolocation sort field. """ def inner(asc): locations = request.values.getlist(argument, type=str) field = { '_geo_distance': { field_name: locations, 'order': 'asc' if asc else 'desc', 'unit': unit, } } if mode: field['_geo_distance']['mode'] = mode if distance_type: field['_geo_distance']['distance_type'] = distance_type return field return inner
python
def geolocation_sort(field_name, argument, unit, mode=None, distance_type=None): def inner(asc): locations = request.values.getlist(argument, type=str) field = { '_geo_distance': { field_name: locations, 'order': 'asc' if asc else 'desc', 'unit': unit, } } if mode: field['_geo_distance']['mode'] = mode if distance_type: field['_geo_distance']['distance_type'] = distance_type return field return inner
[ "def", "geolocation_sort", "(", "field_name", ",", "argument", ",", "unit", ",", "mode", "=", "None", ",", "distance_type", "=", "None", ")", ":", "def", "inner", "(", "asc", ")", ":", "locations", "=", "request", ".", "values", ".", "getlist", "(", "a...
Sort field factory for geo-location based sorting. :param argument: Name of URL query string field to parse pin location from. Multiple locations can be provided. Each location can be either a string "latitude,longitude" or a geohash. :param unit: Distance unit (e.g. km). :param mode: Sort mode (avg, min, max). :param distance_type: Distance calculation mode. :returns: Function that returns geolocation sort field.
[ "Sort", "field", "factory", "for", "geo", "-", "location", "based", "sorting", "." ]
e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/sorter.py#L29-L55
13,702
inveniosoftware/invenio-records-rest
invenio_records_rest/sorter.py
eval_field
def eval_field(field, asc): """Evaluate a field for sorting purpose. :param field: Field definition (string, dict or callable). :param asc: ``True`` if order is ascending, ``False`` if descending. :returns: Dictionary with the sort field query. """ if isinstance(field, dict): if asc: return field else: # Field should only have one key and must have an order subkey. field = copy.deepcopy(field) key = list(field.keys())[0] field[key]['order'] = reverse_order(field[key]['order']) return field elif callable(field): return field(asc) else: key, key_asc = parse_sort_field(field) if not asc: key_asc = not key_asc return {key: {'order': 'asc' if key_asc else 'desc'}}
python
def eval_field(field, asc): if isinstance(field, dict): if asc: return field else: # Field should only have one key and must have an order subkey. field = copy.deepcopy(field) key = list(field.keys())[0] field[key]['order'] = reverse_order(field[key]['order']) return field elif callable(field): return field(asc) else: key, key_asc = parse_sort_field(field) if not asc: key_asc = not key_asc return {key: {'order': 'asc' if key_asc else 'desc'}}
[ "def", "eval_field", "(", "field", ",", "asc", ")", ":", "if", "isinstance", "(", "field", ",", "dict", ")", ":", "if", "asc", ":", "return", "field", "else", ":", "# Field should only have one key and must have an order subkey.", "field", "=", "copy", ".", "d...
Evaluate a field for sorting purpose. :param field: Field definition (string, dict or callable). :param asc: ``True`` if order is ascending, ``False`` if descending. :returns: Dictionary with the sort field query.
[ "Evaluate", "a", "field", "for", "sorting", "purpose", "." ]
e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/sorter.py#L82-L104
13,703
inveniosoftware/invenio-records-rest
invenio_records_rest/sorter.py
default_sorter_factory
def default_sorter_factory(search, index): """Default sort query factory. :param query: Search query. :param index: Index to search in. :returns: Tuple of (query, URL arguments). """ sort_arg_name = 'sort' urlfield = request.values.get(sort_arg_name, '', type=str) # Get default sorting if sort is not specified. if not urlfield: # cast to six.text_type to handle unicodes in Python 2 has_query = request.values.get('q', type=six.text_type) urlfield = current_app.config['RECORDS_REST_DEFAULT_SORT'].get( index, {}).get('query' if has_query else 'noquery', '') # Parse sort argument key, asc = parse_sort_field(urlfield) # Get sort options sort_options = current_app.config['RECORDS_REST_SORT_OPTIONS'].get( index, {}).get(key) if sort_options is None: return (search, {}) # Get fields to sort query by search = search.sort( *[eval_field(f, asc) for f in sort_options['fields']] ) return (search, {sort_arg_name: urlfield})
python
def default_sorter_factory(search, index): sort_arg_name = 'sort' urlfield = request.values.get(sort_arg_name, '', type=str) # Get default sorting if sort is not specified. if not urlfield: # cast to six.text_type to handle unicodes in Python 2 has_query = request.values.get('q', type=six.text_type) urlfield = current_app.config['RECORDS_REST_DEFAULT_SORT'].get( index, {}).get('query' if has_query else 'noquery', '') # Parse sort argument key, asc = parse_sort_field(urlfield) # Get sort options sort_options = current_app.config['RECORDS_REST_SORT_OPTIONS'].get( index, {}).get(key) if sort_options is None: return (search, {}) # Get fields to sort query by search = search.sort( *[eval_field(f, asc) for f in sort_options['fields']] ) return (search, {sort_arg_name: urlfield})
[ "def", "default_sorter_factory", "(", "search", ",", "index", ")", ":", "sort_arg_name", "=", "'sort'", "urlfield", "=", "request", ".", "values", ".", "get", "(", "sort_arg_name", ",", "''", ",", "type", "=", "str", ")", "# Get default sorting if sort is not sp...
Default sort query factory. :param query: Search query. :param index: Index to search in. :returns: Tuple of (query, URL arguments).
[ "Default", "sort", "query", "factory", "." ]
e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/sorter.py#L107-L137
13,704
inveniosoftware/invenio-records-rest
invenio_records_rest/schemas/json.py
RecordMetadataSchemaJSONV1.inject_pid
def inject_pid(self, data): """Inject context PID in the RECID field.""" # Remove already deserialized "pid" field pid_value = data.pop('pid', None) if pid_value: pid_field = current_app.config['PIDSTORE_RECID_FIELD'] data.setdefault(pid_field, pid_value) return data
python
def inject_pid(self, data): # Remove already deserialized "pid" field pid_value = data.pop('pid', None) if pid_value: pid_field = current_app.config['PIDSTORE_RECID_FIELD'] data.setdefault(pid_field, pid_value) return data
[ "def", "inject_pid", "(", "self", ",", "data", ")", ":", "# Remove already deserialized \"pid\" field", "pid_value", "=", "data", ".", "pop", "(", "'pid'", ",", "None", ")", "if", "pid_value", ":", "pid_field", "=", "current_app", ".", "config", "[", "'PIDSTOR...
Inject context PID in the RECID field.
[ "Inject", "context", "PID", "in", "the", "RECID", "field", "." ]
e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/schemas/json.py#L83-L90
13,705
pydanny-archive/django-uni-form
uni_form/templatetags/uni_form_tags.py
BasicNode.get_render
def get_render(self, context): """ Returns a `Context` object with all the necesarry stuff for rendering the form :param context: `django.template.Context` variable holding the context for the node `self.form` and `self.helper` are resolved into real Python objects resolving them from the `context`. The `actual_form` can be a form or a formset. If it's a formset `is_formset` is set to True. If the helper has a layout we use it, for rendering the form or the formset's forms. """ actual_form = self.form.resolve(context) attrs = {} if self.helper is not None: helper = self.helper.resolve(context) if not isinstance(helper, FormHelper): raise TypeError('helper object provided to uni_form tag must be a uni_form.helpers.FormHelper object.') attrs = helper.get_attributes() else: helper = None # We get the response dictionary is_formset = isinstance(actual_form, BaseFormSet) response_dict = self.get_response_dict(attrs, context, is_formset) # If we have a helper's layout we use it, for the form or the formset's forms if helper and helper.layout: if not is_formset: actual_form.form_html = helper.render_layout(actual_form, context) else: forloop = ForLoopSimulator(actual_form) for form in actual_form.forms: context.update({'forloop': forloop}) form.form_html = helper.render_layout(form, context) forloop.iterate() if is_formset: response_dict.update({'formset': actual_form}) else: response_dict.update({'form': actual_form}) return Context(response_dict)
python
def get_render(self, context): actual_form = self.form.resolve(context) attrs = {} if self.helper is not None: helper = self.helper.resolve(context) if not isinstance(helper, FormHelper): raise TypeError('helper object provided to uni_form tag must be a uni_form.helpers.FormHelper object.') attrs = helper.get_attributes() else: helper = None # We get the response dictionary is_formset = isinstance(actual_form, BaseFormSet) response_dict = self.get_response_dict(attrs, context, is_formset) # If we have a helper's layout we use it, for the form or the formset's forms if helper and helper.layout: if not is_formset: actual_form.form_html = helper.render_layout(actual_form, context) else: forloop = ForLoopSimulator(actual_form) for form in actual_form.forms: context.update({'forloop': forloop}) form.form_html = helper.render_layout(form, context) forloop.iterate() if is_formset: response_dict.update({'formset': actual_form}) else: response_dict.update({'form': actual_form}) return Context(response_dict)
[ "def", "get_render", "(", "self", ",", "context", ")", ":", "actual_form", "=", "self", ".", "form", ".", "resolve", "(", "context", ")", "attrs", "=", "{", "}", "if", "self", ".", "helper", "is", "not", "None", ":", "helper", "=", "self", ".", "he...
Returns a `Context` object with all the necesarry stuff for rendering the form :param context: `django.template.Context` variable holding the context for the node `self.form` and `self.helper` are resolved into real Python objects resolving them from the `context`. The `actual_form` can be a form or a formset. If it's a formset `is_formset` is set to True. If the helper has a layout we use it, for rendering the form or the formset's forms.
[ "Returns", "a", "Context", "object", "with", "all", "the", "necesarry", "stuff", "for", "rendering", "the", "form" ]
159f539e2fb98752b7964d75e955fc62881c28fb
https://github.com/pydanny-archive/django-uni-form/blob/159f539e2fb98752b7964d75e955fc62881c28fb/uni_form/templatetags/uni_form_tags.py#L66-L108
13,706
pydanny-archive/django-uni-form
uni_form/helper.py
FormHelper.get_attributes
def get_attributes(self): """ Used by the uni_form_tags to get helper attributes """ items = {} items['form_method'] = self.form_method.strip() items['form_tag'] = self.form_tag items['form_style'] = self.form_style.strip() if self.form_action: items['form_action'] = self.form_action.strip() if self.form_id: items['id'] = self.form_id.strip() if self.form_class: items['class'] = self.form_class.strip() if self.inputs: items['inputs'] = self.inputs if self.form_error_title: items['form_error_title'] = self.form_error_title.strip() if self.formset_error_title: items['formset_error_title'] = self.formset_error_title.strip() return items
python
def get_attributes(self): items = {} items['form_method'] = self.form_method.strip() items['form_tag'] = self.form_tag items['form_style'] = self.form_style.strip() if self.form_action: items['form_action'] = self.form_action.strip() if self.form_id: items['id'] = self.form_id.strip() if self.form_class: items['class'] = self.form_class.strip() if self.inputs: items['inputs'] = self.inputs if self.form_error_title: items['form_error_title'] = self.form_error_title.strip() if self.formset_error_title: items['formset_error_title'] = self.formset_error_title.strip() return items
[ "def", "get_attributes", "(", "self", ")", ":", "items", "=", "{", "}", "items", "[", "'form_method'", "]", "=", "self", ".", "form_method", ".", "strip", "(", ")", "items", "[", "'form_tag'", "]", "=", "self", ".", "form_tag", "items", "[", "'form_sty...
Used by the uni_form_tags to get helper attributes
[ "Used", "by", "the", "uni_form_tags", "to", "get", "helper", "attributes" ]
159f539e2fb98752b7964d75e955fc62881c28fb
https://github.com/pydanny-archive/django-uni-form/blob/159f539e2fb98752b7964d75e955fc62881c28fb/uni_form/helper.py#L170-L191
13,707
lpantano/seqcluster
seqcluster/libs/classes.py
sequence_unique.add_exp
def add_exp(self,gr,exp): """Function to add the counts for each sample :param gr: name of the sample :param exp: counts of sample **gr** :returns: dict with key,values equally to name,counts. """ self.group[gr] = exp self.total = sum(self.group.values())
python
def add_exp(self,gr,exp): self.group[gr] = exp self.total = sum(self.group.values())
[ "def", "add_exp", "(", "self", ",", "gr", ",", "exp", ")", ":", "self", ".", "group", "[", "gr", "]", "=", "exp", "self", ".", "total", "=", "sum", "(", "self", ".", "group", ".", "values", "(", ")", ")" ]
Function to add the counts for each sample :param gr: name of the sample :param exp: counts of sample **gr** :returns: dict with key,values equally to name,counts.
[ "Function", "to", "add", "the", "counts", "for", "each", "sample" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/classes.py#L21-L30
13,708
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeJoint
def MakeJoint(pmf1, pmf2): """Joint distribution of values from pmf1 and pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: Joint pmf of value pairs """ joint = Joint() for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): joint.Set((v1, v2), p1 * p2) return joint
python
def MakeJoint(pmf1, pmf2): joint = Joint() for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): joint.Set((v1, v2), p1 * p2) return joint
[ "def", "MakeJoint", "(", "pmf1", ",", "pmf2", ")", ":", "joint", "=", "Joint", "(", ")", "for", "v1", ",", "p1", "in", "pmf1", ".", "Items", "(", ")", ":", "for", "v2", ",", "p2", "in", "pmf2", ".", "Items", "(", ")", ":", "joint", ".", "Set"...
Joint distribution of values from pmf1 and pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: Joint pmf of value pairs
[ "Joint", "distribution", "of", "values", "from", "pmf1", "and", "pmf2", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L702-L716
13,709
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeHistFromList
def MakeHistFromList(t, name=''): """Makes a histogram from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this histogram Returns: Hist object """ hist = Hist(name=name) [hist.Incr(x) for x in t] return hist
python
def MakeHistFromList(t, name=''): hist = Hist(name=name) [hist.Incr(x) for x in t] return hist
[ "def", "MakeHistFromList", "(", "t", ",", "name", "=", "''", ")", ":", "hist", "=", "Hist", "(", "name", "=", "name", ")", "[", "hist", ".", "Incr", "(", "x", ")", "for", "x", "in", "t", "]", "return", "hist" ]
Makes a histogram from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this histogram Returns: Hist object
[ "Makes", "a", "histogram", "from", "an", "unsorted", "sequence", "of", "values", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L719-L731
13,710
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakePmfFromList
def MakePmfFromList(t, name=''): """Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this PMF Returns: Pmf object """ hist = MakeHistFromList(t) d = hist.GetDict() pmf = Pmf(d, name) pmf.Normalize() return pmf
python
def MakePmfFromList(t, name=''): hist = MakeHistFromList(t) d = hist.GetDict() pmf = Pmf(d, name) pmf.Normalize() return pmf
[ "def", "MakePmfFromList", "(", "t", ",", "name", "=", "''", ")", ":", "hist", "=", "MakeHistFromList", "(", "t", ")", "d", "=", "hist", ".", "GetDict", "(", ")", "pmf", "=", "Pmf", "(", "d", ",", "name", ")", "pmf", ".", "Normalize", "(", ")", ...
Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this PMF Returns: Pmf object
[ "Makes", "a", "PMF", "from", "an", "unsorted", "sequence", "of", "values", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L747-L761
13,711
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakePmfFromDict
def MakePmfFromDict(d, name=''): """Makes a PMF from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this PMF Returns: Pmf object """ pmf = Pmf(d, name) pmf.Normalize() return pmf
python
def MakePmfFromDict(d, name=''): pmf = Pmf(d, name) pmf.Normalize() return pmf
[ "def", "MakePmfFromDict", "(", "d", ",", "name", "=", "''", ")", ":", "pmf", "=", "Pmf", "(", "d", ",", "name", ")", "pmf", ".", "Normalize", "(", ")", "return", "pmf" ]
Makes a PMF from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this PMF Returns: Pmf object
[ "Makes", "a", "PMF", "from", "a", "map", "from", "values", "to", "probabilities", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L764-L776
13,712
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakePmfFromItems
def MakePmfFromItems(t, name=''): """Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs name: string name for this PMF Returns: Pmf object """ pmf = Pmf(dict(t), name) pmf.Normalize() return pmf
python
def MakePmfFromItems(t, name=''): pmf = Pmf(dict(t), name) pmf.Normalize() return pmf
[ "def", "MakePmfFromItems", "(", "t", ",", "name", "=", "''", ")", ":", "pmf", "=", "Pmf", "(", "dict", "(", "t", ")", ",", "name", ")", "pmf", ".", "Normalize", "(", ")", "return", "pmf" ]
Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs name: string name for this PMF Returns: Pmf object
[ "Makes", "a", "PMF", "from", "a", "sequence", "of", "value", "-", "probability", "pairs" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L779-L791
13,713
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakePmfFromHist
def MakePmfFromHist(hist, name=None): """Makes a normalized PMF from a Hist object. Args: hist: Hist object name: string name Returns: Pmf object """ if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) pmf = Pmf(d, name) pmf.Normalize() return pmf
python
def MakePmfFromHist(hist, name=None): if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) pmf = Pmf(d, name) pmf.Normalize() return pmf
[ "def", "MakePmfFromHist", "(", "hist", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "hist", ".", "name", "# make a copy of the dictionary", "d", "=", "dict", "(", "hist", ".", "GetDict", "(", ")", ")", "pmf", "=", ...
Makes a normalized PMF from a Hist object. Args: hist: Hist object name: string name Returns: Pmf object
[ "Makes", "a", "normalized", "PMF", "from", "a", "Hist", "object", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L794-L811
13,714
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakePmfFromCdf
def MakePmfFromCdf(cdf, name=None): """Makes a normalized Pmf from a Cdf object. Args: cdf: Cdf object name: string name for the new Pmf Returns: Pmf object """ if name is None: name = cdf.name pmf = Pmf(name=name) prev = 0.0 for val, prob in cdf.Items(): pmf.Incr(val, prob - prev) prev = prob return pmf
python
def MakePmfFromCdf(cdf, name=None): if name is None: name = cdf.name pmf = Pmf(name=name) prev = 0.0 for val, prob in cdf.Items(): pmf.Incr(val, prob - prev) prev = prob return pmf
[ "def", "MakePmfFromCdf", "(", "cdf", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "cdf", ".", "name", "pmf", "=", "Pmf", "(", "name", "=", "name", ")", "prev", "=", "0.0", "for", "val", ",", "prob", "in", "c...
Makes a normalized Pmf from a Cdf object. Args: cdf: Cdf object name: string name for the new Pmf Returns: Pmf object
[ "Makes", "a", "normalized", "Pmf", "from", "a", "Cdf", "object", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L814-L834
13,715
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeMixture
def MakeMixture(metapmf, name='mix'): """Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. name: string name for the new Pmf. Returns: Pmf object. """ mix = Pmf(name=name) for pmf, p1 in metapmf.Items(): for x, p2 in pmf.Items(): mix.Incr(x, p1 * p2) return mix
python
def MakeMixture(metapmf, name='mix'): mix = Pmf(name=name) for pmf, p1 in metapmf.Items(): for x, p2 in pmf.Items(): mix.Incr(x, p1 * p2) return mix
[ "def", "MakeMixture", "(", "metapmf", ",", "name", "=", "'mix'", ")", ":", "mix", "=", "Pmf", "(", "name", "=", "name", ")", "for", "pmf", ",", "p1", "in", "metapmf", ".", "Items", "(", ")", ":", "for", "x", ",", "p2", "in", "pmf", ".", "Items"...
Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. name: string name for the new Pmf. Returns: Pmf object.
[ "Make", "a", "mixture", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L837-L850
13,716
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeUniformPmf
def MakeUniformPmf(low, high, n): """Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values """ pmf = Pmf() for x in numpy.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf
python
def MakeUniformPmf(low, high, n): pmf = Pmf() for x in numpy.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf
[ "def", "MakeUniformPmf", "(", "low", ",", "high", ",", "n", ")", ":", "pmf", "=", "Pmf", "(", ")", "for", "x", "in", "numpy", ".", "linspace", "(", "low", ",", "high", ",", "n", ")", ":", "pmf", ".", "Set", "(", "x", ",", "1", ")", "pmf", "...
Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values
[ "Make", "a", "uniform", "Pmf", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L853-L864
13,717
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeCdfFromPmf
def MakeCdfFromPmf(pmf, name=None): """Makes a CDF from a Pmf object. Args: pmf: Pmf.Pmf object name: string name for the data. Returns: Cdf object """ if name == None: name = pmf.name return MakeCdfFromItems(pmf.Items(), name)
python
def MakeCdfFromPmf(pmf, name=None): if name == None: name = pmf.name return MakeCdfFromItems(pmf.Items(), name)
[ "def", "MakeCdfFromPmf", "(", "pmf", ",", "name", "=", "None", ")", ":", "if", "name", "==", "None", ":", "name", "=", "pmf", ".", "name", "return", "MakeCdfFromItems", "(", "pmf", ".", "Items", "(", ")", ",", "name", ")" ]
Makes a CDF from a Pmf object. Args: pmf: Pmf.Pmf object name: string name for the data. Returns: Cdf object
[ "Makes", "a", "CDF", "from", "a", "Pmf", "object", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1117-L1129
13,718
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeSuiteFromList
def MakeSuiteFromList(t, name=''): """Makes a suite from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this suite Returns: Suite object """ hist = MakeHistFromList(t) d = hist.GetDict() return MakeSuiteFromDict(d)
python
def MakeSuiteFromList(t, name=''): hist = MakeHistFromList(t) d = hist.GetDict() return MakeSuiteFromDict(d)
[ "def", "MakeSuiteFromList", "(", "t", ",", "name", "=", "''", ")", ":", "hist", "=", "MakeHistFromList", "(", "t", ")", "d", "=", "hist", ".", "GetDict", "(", ")", "return", "MakeSuiteFromDict", "(", "d", ")" ]
Makes a suite from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this suite Returns: Suite object
[ "Makes", "a", "suite", "from", "an", "unsorted", "sequence", "of", "values", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1250-L1262
13,719
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeSuiteFromHist
def MakeSuiteFromHist(hist, name=None): """Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object """ if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, name)
python
def MakeSuiteFromHist(hist, name=None): if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, name)
[ "def", "MakeSuiteFromHist", "(", "hist", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "hist", ".", "name", "# make a copy of the dictionary", "d", "=", "dict", "(", "hist", ".", "GetDict", "(", ")", ")", "return", "...
Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object
[ "Makes", "a", "normalized", "suite", "from", "a", "Hist", "object", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1265-L1280
13,720
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeSuiteFromDict
def MakeSuiteFromDict(d, name=''): """Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this suite Returns: Suite object """ suite = Suite(name=name) suite.SetDict(d) suite.Normalize() return suite
python
def MakeSuiteFromDict(d, name=''): suite = Suite(name=name) suite.SetDict(d) suite.Normalize() return suite
[ "def", "MakeSuiteFromDict", "(", "d", ",", "name", "=", "''", ")", ":", "suite", "=", "Suite", "(", "name", "=", "name", ")", "suite", ".", "SetDict", "(", "d", ")", "suite", ".", "Normalize", "(", ")", "return", "suite" ]
Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this suite Returns: Suite object
[ "Makes", "a", "suite", "from", "a", "map", "from", "values", "to", "probabilities", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1283-L1296
13,721
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeSuiteFromCdf
def MakeSuiteFromCdf(cdf, name=None): """Makes a normalized Suite from a Cdf object. Args: cdf: Cdf object name: string name for the new Suite Returns: Suite object """ if name is None: name = cdf.name suite = Suite(name=name) prev = 0.0 for val, prob in cdf.Items(): suite.Incr(val, prob - prev) prev = prob return suite
python
def MakeSuiteFromCdf(cdf, name=None): if name is None: name = cdf.name suite = Suite(name=name) prev = 0.0 for val, prob in cdf.Items(): suite.Incr(val, prob - prev) prev = prob return suite
[ "def", "MakeSuiteFromCdf", "(", "cdf", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "cdf", ".", "name", "suite", "=", "Suite", "(", "name", "=", "name", ")", "prev", "=", "0.0", "for", "val", ",", "prob", "in"...
Makes a normalized Suite from a Cdf object. Args: cdf: Cdf object name: string name for the new Suite Returns: Suite object
[ "Makes", "a", "normalized", "Suite", "from", "a", "Cdf", "object", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1299-L1319
13,722
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Percentile
def Percentile(pmf, percentage): """Computes a percentile of a given Pmf. percentage: float 0-100 """ p = percentage / 100.0 total = 0 for val, prob in pmf.Items(): total += prob if total >= p: return val
python
def Percentile(pmf, percentage): p = percentage / 100.0 total = 0 for val, prob in pmf.Items(): total += prob if total >= p: return val
[ "def", "Percentile", "(", "pmf", ",", "percentage", ")", ":", "p", "=", "percentage", "/", "100.0", "total", "=", "0", "for", "val", ",", "prob", "in", "pmf", ".", "Items", "(", ")", ":", "total", "+=", "prob", "if", "total", ">=", "p", ":", "ret...
Computes a percentile of a given Pmf. percentage: float 0-100
[ "Computes", "a", "percentile", "of", "a", "given", "Pmf", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1389-L1399
13,723
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
CredibleInterval
def CredibleInterval(pmf, percentage=90): """Computes a credible interval for a given distribution. If percentage=90, computes the 90% CI. Args: pmf: Pmf object representing a posterior distribution percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = pmf.MakeCdf() prob = (1 - percentage / 100.0) / 2 interval = cdf.Value(prob), cdf.Value(1 - prob) return interval
python
def CredibleInterval(pmf, percentage=90): cdf = pmf.MakeCdf() prob = (1 - percentage / 100.0) / 2 interval = cdf.Value(prob), cdf.Value(1 - prob) return interval
[ "def", "CredibleInterval", "(", "pmf", ",", "percentage", "=", "90", ")", ":", "cdf", "=", "pmf", ".", "MakeCdf", "(", ")", "prob", "=", "(", "1", "-", "percentage", "/", "100.0", ")", "/", "2", "interval", "=", "cdf", ".", "Value", "(", "prob", ...
Computes a credible interval for a given distribution. If percentage=90, computes the 90% CI. Args: pmf: Pmf object representing a posterior distribution percentage: float between 0 and 100 Returns: sequence of two floats, low and high
[ "Computes", "a", "credible", "interval", "for", "a", "given", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1402-L1417
13,724
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
PmfProbLess
def PmfProbLess(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 < v2: total += p1 * p2 return total
python
def PmfProbLess(pmf1, pmf2): total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 < v2: total += p1 * p2 return total
[ "def", "PmfProbLess", "(", "pmf1", ",", "pmf2", ")", ":", "total", "=", "0.0", "for", "v1", ",", "p1", "in", "pmf1", ".", "Items", "(", ")", ":", "for", "v2", ",", "p2", "in", "pmf2", ".", "Items", "(", ")", ":", "if", "v1", "<", "v2", ":", ...
Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability
[ "Probability", "that", "a", "value", "from", "pmf1", "is", "less", "than", "a", "value", "from", "pmf2", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1420-L1435
13,725
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
SampleSum
def SampleSum(dists, n): """Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns: new Pmf of sums """ pmf = MakePmfFromList(RandomSum(dists) for i in xrange(n)) return pmf
python
def SampleSum(dists, n): pmf = MakePmfFromList(RandomSum(dists) for i in xrange(n)) return pmf
[ "def", "SampleSum", "(", "dists", ",", "n", ")", ":", "pmf", "=", "MakePmfFromList", "(", "RandomSum", "(", "dists", ")", "for", "i", "in", "xrange", "(", "n", ")", ")", "return", "pmf" ]
Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns: new Pmf of sums
[ "Draws", "a", "sample", "of", "sums", "from", "a", "list", "of", "distributions", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1485-L1494
13,726
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
EvalGaussianPdf
def EvalGaussianPdf(x, mu, sigma): """Computes the unnormalized PDF of the normal distribution. x: value mu: mean sigma: standard deviation returns: float probability density """ return scipy.stats.norm.pdf(x, mu, sigma)
python
def EvalGaussianPdf(x, mu, sigma): return scipy.stats.norm.pdf(x, mu, sigma)
[ "def", "EvalGaussianPdf", "(", "x", ",", "mu", ",", "sigma", ")", ":", "return", "scipy", ".", "stats", ".", "norm", ".", "pdf", "(", "x", ",", "mu", ",", "sigma", ")" ]
Computes the unnormalized PDF of the normal distribution. x: value mu: mean sigma: standard deviation returns: float probability density
[ "Computes", "the", "unnormalized", "PDF", "of", "the", "normal", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1497-L1506
13,727
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
EvalBinomialPmf
def EvalBinomialPmf(k, n, p): """Evaluates the binomial pmf. Returns the probabily of k successes in n trials with probability p. """ return scipy.stats.binom.pmf(k, n, p)
python
def EvalBinomialPmf(k, n, p): return scipy.stats.binom.pmf(k, n, p)
[ "def", "EvalBinomialPmf", "(", "k", ",", "n", ",", "p", ")", ":", "return", "scipy", ".", "stats", ".", "binom", ".", "pmf", "(", "k", ",", "n", ",", "p", ")" ]
Evaluates the binomial pmf. Returns the probabily of k successes in n trials with probability p.
[ "Evaluates", "the", "binomial", "pmf", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1530-L1535
13,728
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
EvalPoissonPmf
def EvalPoissonPmf(k, lam): """Computes the Poisson PMF. k: number of events lam: parameter lambda in events per unit time returns: float probability """ # don't use the scipy function (yet). for lam=0 it returns NaN; # should be 0.0 # return scipy.stats.poisson.pmf(k, lam) return lam ** k * math.exp(-lam) / math.factorial(k)
python
def EvalPoissonPmf(k, lam): # don't use the scipy function (yet). for lam=0 it returns NaN; # should be 0.0 # return scipy.stats.poisson.pmf(k, lam) return lam ** k * math.exp(-lam) / math.factorial(k)
[ "def", "EvalPoissonPmf", "(", "k", ",", "lam", ")", ":", "# don't use the scipy function (yet). for lam=0 it returns NaN;", "# should be 0.0", "# return scipy.stats.poisson.pmf(k, lam)", "return", "lam", "**", "k", "*", "math", ".", "exp", "(", "-", "lam", ")", "/", ...
Computes the Poisson PMF. k: number of events lam: parameter lambda in events per unit time returns: float probability
[ "Computes", "the", "Poisson", "PMF", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1538-L1550
13,729
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakePoissonPmf
def MakePoissonPmf(lam, high, step=1): """Makes a PMF discrete approx to a Poisson distribution. lam: parameter lambda in events per unit time high: upper bound of the Pmf returns: normalized Pmf """ pmf = Pmf() for k in xrange(0, high + 1, step): p = EvalPoissonPmf(k, lam) pmf.Set(k, p) pmf.Normalize() return pmf
python
def MakePoissonPmf(lam, high, step=1): pmf = Pmf() for k in xrange(0, high + 1, step): p = EvalPoissonPmf(k, lam) pmf.Set(k, p) pmf.Normalize() return pmf
[ "def", "MakePoissonPmf", "(", "lam", ",", "high", ",", "step", "=", "1", ")", ":", "pmf", "=", "Pmf", "(", ")", "for", "k", "in", "xrange", "(", "0", ",", "high", "+", "1", ",", "step", ")", ":", "p", "=", "EvalPoissonPmf", "(", "k", ",", "la...
Makes a PMF discrete approx to a Poisson distribution. lam: parameter lambda in events per unit time high: upper bound of the Pmf returns: normalized Pmf
[ "Makes", "a", "PMF", "discrete", "approx", "to", "a", "Poisson", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1553-L1566
13,730
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
MakeExponentialPmf
def MakeExponentialPmf(lam, high, n=200): """Makes a PMF discrete approx to an exponential distribution. lam: parameter lambda in events per unit time high: upper bound n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() for x in numpy.linspace(0, high, n): p = EvalExponentialPdf(x, lam) pmf.Set(x, p) pmf.Normalize() return pmf
python
def MakeExponentialPmf(lam, high, n=200): pmf = Pmf() for x in numpy.linspace(0, high, n): p = EvalExponentialPdf(x, lam) pmf.Set(x, p) pmf.Normalize() return pmf
[ "def", "MakeExponentialPmf", "(", "lam", ",", "high", ",", "n", "=", "200", ")", ":", "pmf", "=", "Pmf", "(", ")", "for", "x", "in", "numpy", ".", "linspace", "(", "0", ",", "high", ",", "n", ")", ":", "p", "=", "EvalExponentialPdf", "(", "x", ...
Makes a PMF discrete approx to an exponential distribution. lam: parameter lambda in events per unit time high: upper bound n: number of values in the Pmf returns: normalized Pmf
[ "Makes", "a", "PMF", "discrete", "approx", "to", "an", "exponential", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1585-L1599
13,731
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
GaussianCdfInverse
def GaussianCdfInverse(p, mu=0, sigma=1): """Evaluates the inverse CDF of the gaussian distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float """ x = ROOT2 * erfinv(2 * p - 1) return mu + x * sigma
python
def GaussianCdfInverse(p, mu=0, sigma=1): x = ROOT2 * erfinv(2 * p - 1) return mu + x * sigma
[ "def", "GaussianCdfInverse", "(", "p", ",", "mu", "=", "0", ",", "sigma", "=", "1", ")", ":", "x", "=", "ROOT2", "*", "erfinv", "(", "2", "*", "p", "-", "1", ")", "return", "mu", "+", "x", "*", "sigma" ]
Evaluates the inverse CDF of the gaussian distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float
[ "Evaluates", "the", "inverse", "CDF", "of", "the", "gaussian", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1633-L1649
13,732
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
LogBinomialCoef
def LogBinomialCoef(n, k): """Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials k: number of successes Returns: float """ return n * log(n) - k * log(k) - (n - k) * log(n - k)
python
def LogBinomialCoef(n, k): return n * log(n) - k * log(k) - (n - k) * log(n - k)
[ "def", "LogBinomialCoef", "(", "n", ",", "k", ")", ":", "return", "n", "*", "log", "(", "n", ")", "-", "k", "*", "log", "(", "k", ")", "-", "(", "n", "-", "k", ")", "*", "log", "(", "n", "-", "k", ")" ]
Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials k: number of successes Returns: float
[ "Computes", "the", "log", "of", "the", "binomial", "coefficient", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1827-L1838
13,733
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Interpolator.Lookup
def Lookup(self, x): """Looks up x and returns the corresponding value of y.""" return self._Bisect(x, self.xs, self.ys)
python
def Lookup(self, x): return self._Bisect(x, self.xs, self.ys)
[ "def", "Lookup", "(", "self", ",", "x", ")", ":", "return", "self", ".", "_Bisect", "(", "x", ",", "self", ".", "xs", ",", "self", ".", "ys", ")" ]
Looks up x and returns the corresponding value of y.
[ "Looks", "up", "x", "and", "returns", "the", "corresponding", "value", "of", "y", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L100-L102
13,734
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Interpolator.Reverse
def Reverse(self, y): """Looks up y and returns the corresponding value of x.""" return self._Bisect(y, self.ys, self.xs)
python
def Reverse(self, y): return self._Bisect(y, self.ys, self.xs)
[ "def", "Reverse", "(", "self", ",", "y", ")", ":", "return", "self", ".", "_Bisect", "(", "y", ",", "self", ".", "ys", ",", "self", ".", "xs", ")" ]
Looks up y and returns the corresponding value of x.
[ "Looks", "up", "y", "and", "returns", "the", "corresponding", "value", "of", "x", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L104-L106
13,735
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
_DictWrapper.InitMapping
def InitMapping(self, values): """Initializes with a map from value to probability. values: map from value to probability """ for value, prob in values.iteritems(): self.Set(value, prob)
python
def InitMapping(self, values): for value, prob in values.iteritems(): self.Set(value, prob)
[ "def", "InitMapping", "(", "self", ",", "values", ")", ":", "for", "value", ",", "prob", "in", "values", ".", "iteritems", "(", ")", ":", "self", ".", "Set", "(", "value", ",", "prob", ")" ]
Initializes with a map from value to probability. values: map from value to probability
[ "Initializes", "with", "a", "map", "from", "value", "to", "probability", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L162-L168
13,736
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
_DictWrapper.InitPmf
def InitPmf(self, values): """Initializes with a Pmf. values: Pmf object """ for value, prob in values.Items(): self.Set(value, prob)
python
def InitPmf(self, values): for value, prob in values.Items(): self.Set(value, prob)
[ "def", "InitPmf", "(", "self", ",", "values", ")", ":", "for", "value", ",", "prob", "in", "values", ".", "Items", "(", ")", ":", "self", ".", "Set", "(", "value", ",", "prob", ")" ]
Initializes with a Pmf. values: Pmf object
[ "Initializes", "with", "a", "Pmf", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L170-L176
13,737
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
_DictWrapper.Copy
def Copy(self, name=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist """ new = copy.copy(self) new.d = copy.copy(self.d) new.name = name if name is not None else self.name return new
python
def Copy(self, name=None): new = copy.copy(self) new.d = copy.copy(self.d) new.name = name if name is not None else self.name return new
[ "def", "Copy", "(", "self", ",", "name", "=", "None", ")", ":", "new", "=", "copy", ".", "copy", "(", "self", ")", "new", ".", "d", "=", "copy", ".", "copy", "(", "self", ".", "d", ")", "new", ".", "name", "=", "name", "if", "name", "is", "...
Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist
[ "Returns", "a", "copy", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L194-L206
13,738
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
_DictWrapper.Scale
def Scale(self, factor): """Multiplies the values by a factor. factor: what to multiply by Returns: new object """ new = self.Copy() new.d.clear() for val, prob in self.Items(): new.Set(val * factor, prob) return new
python
def Scale(self, factor): new = self.Copy() new.d.clear() for val, prob in self.Items(): new.Set(val * factor, prob) return new
[ "def", "Scale", "(", "self", ",", "factor", ")", ":", "new", "=", "self", ".", "Copy", "(", ")", "new", ".", "d", ".", "clear", "(", ")", "for", "val", ",", "prob", "in", "self", ".", "Items", "(", ")", ":", "new", ".", "Set", "(", "val", "...
Multiplies the values by a factor. factor: what to multiply by Returns: new object
[ "Multiplies", "the", "values", "by", "a", "factor", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L208-L220
13,739
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
_DictWrapper.Log
def Log(self, m=None): """Log transforms the probabilities. Removes values with probability 0. Normalizes so that the largest logprob is 0. """ if self.log: raise ValueError("Pmf/Hist already under a log transform") self.log = True if m is None: m = self.MaxLike() for x, p in self.d.iteritems(): if p: self.Set(x, math.log(p / m)) else: self.Remove(x)
python
def Log(self, m=None): if self.log: raise ValueError("Pmf/Hist already under a log transform") self.log = True if m is None: m = self.MaxLike() for x, p in self.d.iteritems(): if p: self.Set(x, math.log(p / m)) else: self.Remove(x)
[ "def", "Log", "(", "self", ",", "m", "=", "None", ")", ":", "if", "self", ".", "log", ":", "raise", "ValueError", "(", "\"Pmf/Hist already under a log transform\"", ")", "self", ".", "log", "=", "True", "if", "m", "is", "None", ":", "m", "=", "self", ...
Log transforms the probabilities. Removes values with probability 0. Normalizes so that the largest logprob is 0.
[ "Log", "transforms", "the", "probabilities", ".", "Removes", "values", "with", "probability", "0", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L222-L240
13,740
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
_DictWrapper.Exp
def Exp(self, m=None): """Exponentiates the probabilities. m: how much to shift the ps before exponentiating If m is None, normalizes so that the largest prob is 1. """ if not self.log: raise ValueError("Pmf/Hist not under a log transform") self.log = False if m is None: m = self.MaxLike() for x, p in self.d.iteritems(): self.Set(x, math.exp(p - m))
python
def Exp(self, m=None): if not self.log: raise ValueError("Pmf/Hist not under a log transform") self.log = False if m is None: m = self.MaxLike() for x, p in self.d.iteritems(): self.Set(x, math.exp(p - m))
[ "def", "Exp", "(", "self", ",", "m", "=", "None", ")", ":", "if", "not", "self", ".", "log", ":", "raise", "ValueError", "(", "\"Pmf/Hist not under a log transform\"", ")", "self", ".", "log", "=", "False", "if", "m", "is", "None", ":", "m", "=", "se...
Exponentiates the probabilities. m: how much to shift the ps before exponentiating If m is None, normalizes so that the largest prob is 1.
[ "Exponentiates", "the", "probabilities", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L242-L257
13,741
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Hist.IsSubset
def IsSubset(self, other): """Checks whether the values in this histogram are a subset of the values in the given histogram.""" for val, freq in self.Items(): if freq > other.Freq(val): return False return True
python
def IsSubset(self, other): for val, freq in self.Items(): if freq > other.Freq(val): return False return True
[ "def", "IsSubset", "(", "self", ",", "other", ")", ":", "for", "val", ",", "freq", "in", "self", ".", "Items", "(", ")", ":", "if", "freq", ">", "other", ".", "Freq", "(", "val", ")", ":", "return", "False", "return", "True" ]
Checks whether the values in this histogram are a subset of the values in the given histogram.
[ "Checks", "whether", "the", "values", "in", "this", "histogram", "are", "a", "subset", "of", "the", "values", "in", "the", "given", "histogram", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L361-L367
13,742
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Hist.Subtract
def Subtract(self, other): """Subtracts the values in the given histogram from this histogram.""" for val, freq in other.Items(): self.Incr(val, -freq)
python
def Subtract(self, other): for val, freq in other.Items(): self.Incr(val, -freq)
[ "def", "Subtract", "(", "self", ",", "other", ")", ":", "for", "val", ",", "freq", "in", "other", ".", "Items", "(", ")", ":", "self", ".", "Incr", "(", "val", ",", "-", "freq", ")" ]
Subtracts the values in the given histogram from this histogram.
[ "Subtracts", "the", "values", "in", "the", "given", "histogram", "from", "this", "histogram", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L369-L372
13,743
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Pmf.ProbGreater
def ProbGreater(self, x): """Probability that a sample from this Pmf exceeds x. x: number returns: float probability """ t = [prob for (val, prob) in self.d.iteritems() if val > x] return sum(t)
python
def ProbGreater(self, x): t = [prob for (val, prob) in self.d.iteritems() if val > x] return sum(t)
[ "def", "ProbGreater", "(", "self", ",", "x", ")", ":", "t", "=", "[", "prob", "for", "(", "val", ",", "prob", ")", "in", "self", ".", "d", ".", "iteritems", "(", ")", "if", "val", ">", "x", "]", "return", "sum", "(", "t", ")" ]
Probability that a sample from this Pmf exceeds x. x: number returns: float probability
[ "Probability", "that", "a", "sample", "from", "this", "Pmf", "exceeds", "x", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L402-L410
13,744
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Pmf.Normalize
def Normalize(self, fraction=1.0): """Normalizes this PMF so the sum of all probs is fraction. Args: fraction: what the total should be after normalization Returns: the total probability before normalizing """ if self.log: raise ValueError("Pmf is under a log transform") total = self.Total() if total == 0.0: raise ValueError('total probability is zero.') logging.warning('Normalize: total probability is zero.') return total factor = float(fraction) / total for x in self.d: self.d[x] *= factor return total
python
def Normalize(self, fraction=1.0): if self.log: raise ValueError("Pmf is under a log transform") total = self.Total() if total == 0.0: raise ValueError('total probability is zero.') logging.warning('Normalize: total probability is zero.') return total factor = float(fraction) / total for x in self.d: self.d[x] *= factor return total
[ "def", "Normalize", "(", "self", ",", "fraction", "=", "1.0", ")", ":", "if", "self", ".", "log", ":", "raise", "ValueError", "(", "\"Pmf is under a log transform\"", ")", "total", "=", "self", ".", "Total", "(", ")", "if", "total", "==", "0.0", ":", "...
Normalizes this PMF so the sum of all probs is fraction. Args: fraction: what the total should be after normalization Returns: the total probability before normalizing
[ "Normalizes", "this", "PMF", "so", "the", "sum", "of", "all", "probs", "is", "fraction", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L485-L506
13,745
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Pmf.Random
def Random(self): """Chooses a random element from this PMF. Returns: float value from the Pmf """ if len(self.d) == 0: raise ValueError('Pmf contains no values.') target = random.random() total = 0.0 for x, p in self.d.iteritems(): total += p if total >= target: return x # we shouldn't get here assert False
python
def Random(self): if len(self.d) == 0: raise ValueError('Pmf contains no values.') target = random.random() total = 0.0 for x, p in self.d.iteritems(): total += p if total >= target: return x # we shouldn't get here assert False
[ "def", "Random", "(", "self", ")", ":", "if", "len", "(", "self", ".", "d", ")", "==", "0", ":", "raise", "ValueError", "(", "'Pmf contains no values.'", ")", "target", "=", "random", ".", "random", "(", ")", "total", "=", "0.0", "for", "x", ",", "...
Chooses a random element from this PMF. Returns: float value from the Pmf
[ "Chooses", "a", "random", "element", "from", "this", "PMF", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L508-L525
13,746
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Pmf.Mean
def Mean(self): """Computes the mean of a PMF. Returns: float mean """ mu = 0.0 for x, p in self.d.iteritems(): mu += p * x return mu
python
def Mean(self): mu = 0.0 for x, p in self.d.iteritems(): mu += p * x return mu
[ "def", "Mean", "(", "self", ")", ":", "mu", "=", "0.0", "for", "x", ",", "p", "in", "self", ".", "d", ".", "iteritems", "(", ")", ":", "mu", "+=", "p", "*", "x", "return", "mu" ]
Computes the mean of a PMF. Returns: float mean
[ "Computes", "the", "mean", "of", "a", "PMF", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L527-L536
13,747
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Pmf.Var
def Var(self, mu=None): """Computes the variance of a PMF. Args: mu: the point around which the variance is computed; if omitted, computes the mean Returns: float variance """ if mu is None: mu = self.Mean() var = 0.0 for x, p in self.d.iteritems(): var += p * (x - mu) ** 2 return var
python
def Var(self, mu=None): if mu is None: mu = self.Mean() var = 0.0 for x, p in self.d.iteritems(): var += p * (x - mu) ** 2 return var
[ "def", "Var", "(", "self", ",", "mu", "=", "None", ")", ":", "if", "mu", "is", "None", ":", "mu", "=", "self", ".", "Mean", "(", ")", "var", "=", "0.0", "for", "x", ",", "p", "in", "self", ".", "d", ".", "iteritems", "(", ")", ":", "var", ...
Computes the variance of a PMF. Args: mu: the point around which the variance is computed; if omitted, computes the mean Returns: float variance
[ "Computes", "the", "variance", "of", "a", "PMF", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L538-L554
13,748
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Pmf.MaximumLikelihood
def MaximumLikelihood(self): """Returns the value with the highest probability. Returns: float probability """ prob, val = max((prob, val) for val, prob in self.Items()) return val
python
def MaximumLikelihood(self): prob, val = max((prob, val) for val, prob in self.Items()) return val
[ "def", "MaximumLikelihood", "(", "self", ")", ":", "prob", ",", "val", "=", "max", "(", "(", "prob", ",", "val", ")", "for", "val", ",", "prob", "in", "self", ".", "Items", "(", ")", ")", "return", "val" ]
Returns the value with the highest probability. Returns: float probability
[ "Returns", "the", "value", "with", "the", "highest", "probability", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L556-L562
13,749
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Pmf.AddPmf
def AddPmf(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 + v2, p1 * p2) return pmf
python
def AddPmf(self, other): pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 + v2, p1 * p2) return pmf
[ "def", "AddPmf", "(", "self", ",", "other", ")", ":", "pmf", "=", "Pmf", "(", ")", "for", "v1", ",", "p1", "in", "self", ".", "Items", "(", ")", ":", "for", "v2", ",", "p2", "in", "other", ".", "Items", "(", ")", ":", "pmf", ".", "Incr", "(...
Computes the Pmf of the sum of values drawn from self and other. other: another Pmf returns: new Pmf
[ "Computes", "the", "Pmf", "of", "the", "sum", "of", "values", "drawn", "from", "self", "and", "other", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L590-L601
13,750
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Pmf.AddConstant
def AddConstant(self, other): """Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 + other, p1) return pmf
python
def AddConstant(self, other): pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 + other, p1) return pmf
[ "def", "AddConstant", "(", "self", ",", "other", ")", ":", "pmf", "=", "Pmf", "(", ")", "for", "v1", ",", "p1", "in", "self", ".", "Items", "(", ")", ":", "pmf", ".", "Set", "(", "v1", "+", "other", ",", "p1", ")", "return", "pmf" ]
Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf
[ "Computes", "the", "Pmf", "of", "the", "sum", "a", "constant", "and", "values", "from", "self", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L603-L613
13,751
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Joint.Marginal
def Marginal(self, i, name=''): """Gets the marginal distribution of the indicated variable. i: index of the variable we want Returns: Pmf """ pmf = Pmf(name=name) for vs, prob in self.Items(): pmf.Incr(vs[i], prob) return pmf
python
def Marginal(self, i, name=''): pmf = Pmf(name=name) for vs, prob in self.Items(): pmf.Incr(vs[i], prob) return pmf
[ "def", "Marginal", "(", "self", ",", "i", ",", "name", "=", "''", ")", ":", "pmf", "=", "Pmf", "(", "name", "=", "name", ")", "for", "vs", ",", "prob", "in", "self", ".", "Items", "(", ")", ":", "pmf", ".", "Incr", "(", "vs", "[", "i", "]",...
Gets the marginal distribution of the indicated variable. i: index of the variable we want Returns: Pmf
[ "Gets", "the", "marginal", "distribution", "of", "the", "indicated", "variable", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L646-L656
13,752
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Joint.Conditional
def Conditional(self, i, j, val, name=''): """Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have Returns: Pmf """ pmf = Pmf(name=name) for vs, prob in self.Items(): if vs[j] != val: continue pmf.Incr(vs[i], prob) pmf.Normalize() return pmf
python
def Conditional(self, i, j, val, name=''): pmf = Pmf(name=name) for vs, prob in self.Items(): if vs[j] != val: continue pmf.Incr(vs[i], prob) pmf.Normalize() return pmf
[ "def", "Conditional", "(", "self", ",", "i", ",", "j", ",", "val", ",", "name", "=", "''", ")", ":", "pmf", "=", "Pmf", "(", "name", "=", "name", ")", "for", "vs", ",", "prob", "in", "self", ".", "Items", "(", ")", ":", "if", "vs", "[", "j"...
Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have Returns: Pmf
[ "Gets", "the", "conditional", "distribution", "of", "the", "indicated", "variable", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L658-L675
13,753
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Joint.MaxLikeInterval
def MaxLikeInterval(self, percentage=90): """Returns the maximum-likelihood credible interval. If percentage=90, computes a 90% CI containing the values with the highest likelihoods. percentage: float between 0 and 100 Returns: list of values from the suite """ interval = [] total = 0 t = [(prob, val) for val, prob in self.Items()] t.sort(reverse=True) for prob, val in t: interval.append(val) total += prob if total >= percentage / 100.0: break return interval
python
def MaxLikeInterval(self, percentage=90): interval = [] total = 0 t = [(prob, val) for val, prob in self.Items()] t.sort(reverse=True) for prob, val in t: interval.append(val) total += prob if total >= percentage / 100.0: break return interval
[ "def", "MaxLikeInterval", "(", "self", ",", "percentage", "=", "90", ")", ":", "interval", "=", "[", "]", "total", "=", "0", "t", "=", "[", "(", "prob", ",", "val", ")", "for", "val", ",", "prob", "in", "self", ".", "Items", "(", ")", "]", "t",...
Returns the maximum-likelihood credible interval. If percentage=90, computes a 90% CI containing the values with the highest likelihoods. percentage: float between 0 and 100 Returns: list of values from the suite
[ "Returns", "the", "maximum", "-", "likelihood", "credible", "interval", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L677-L699
13,754
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Cdf.Copy
def Copy(self, name=None): """Returns a copy of this Cdf. Args: name: string name for the new Cdf """ if name is None: name = self.name return Cdf(list(self.xs), list(self.ps), name)
python
def Copy(self, name=None): if name is None: name = self.name return Cdf(list(self.xs), list(self.ps), name)
[ "def", "Copy", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "self", ".", "name", "return", "Cdf", "(", "list", "(", "self", ".", "xs", ")", ",", "list", "(", "self", ".", "ps", ")", ",", "name...
Returns a copy of this Cdf. Args: name: string name for the new Cdf
[ "Returns", "a", "copy", "of", "this", "Cdf", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L881-L889
13,755
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Cdf.Shift
def Shift(self, term): """Adds a term to the xs. term: how much to add """ new = self.Copy() new.xs = [x + term for x in self.xs] return new
python
def Shift(self, term): new = self.Copy() new.xs = [x + term for x in self.xs] return new
[ "def", "Shift", "(", "self", ",", "term", ")", ":", "new", "=", "self", ".", "Copy", "(", ")", "new", ".", "xs", "=", "[", "x", "+", "term", "for", "x", "in", "self", ".", "xs", "]", "return", "new" ]
Adds a term to the xs. term: how much to add
[ "Adds", "a", "term", "to", "the", "xs", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L917-L924
13,756
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Cdf.Scale
def Scale(self, factor): """Multiplies the xs by a factor. factor: what to multiply by """ new = self.Copy() new.xs = [x * factor for x in self.xs] return new
python
def Scale(self, factor): new = self.Copy() new.xs = [x * factor for x in self.xs] return new
[ "def", "Scale", "(", "self", ",", "factor", ")", ":", "new", "=", "self", ".", "Copy", "(", ")", "new", ".", "xs", "=", "[", "x", "*", "factor", "for", "x", "in", "self", ".", "xs", "]", "return", "new" ]
Multiplies the xs by a factor. factor: what to multiply by
[ "Multiplies", "the", "xs", "by", "a", "factor", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L926-L933
13,757
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Cdf.Mean
def Mean(self): """Computes the mean of a CDF. Returns: float mean """ old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps): p = new_p - old_p total += p * x old_p = new_p return total
python
def Mean(self): old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps): p = new_p - old_p total += p * x old_p = new_p return total
[ "def", "Mean", "(", "self", ")", ":", "old_p", "=", "0", "total", "=", "0.0", "for", "x", ",", "new_p", "in", "zip", "(", "self", ".", "xs", ",", "self", ".", "ps", ")", ":", "p", "=", "new_p", "-", "old_p", "total", "+=", "p", "*", "x", "o...
Computes the mean of a CDF. Returns: float mean
[ "Computes", "the", "mean", "of", "a", "CDF", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L992-L1004
13,758
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Cdf.CredibleInterval
def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval
python
def CredibleInterval(self, percentage=90): prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval
[ "def", "CredibleInterval", "(", "self", ",", "percentage", "=", "90", ")", ":", "prob", "=", "(", "1", "-", "percentage", "/", "100.0", ")", "/", "2", "interval", "=", "self", ".", "Value", "(", "prob", ")", ",", "self", ".", "Value", "(", "1", "...
Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high
[ "Computes", "the", "central", "credible", "interval", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1006-L1019
13,759
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Cdf.Render
def Render(self): """Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Returns: tuple of (xs, ps) """ xs = [self.xs[0]] ps = [0.0] for i, p in enumerate(self.ps): xs.append(self.xs[i]) ps.append(p) try: xs.append(self.xs[i + 1]) ps.append(p) except IndexError: pass return xs, ps
python
def Render(self): xs = [self.xs[0]] ps = [0.0] for i, p in enumerate(self.ps): xs.append(self.xs[i]) ps.append(p) try: xs.append(self.xs[i + 1]) ps.append(p) except IndexError: pass return xs, ps
[ "def", "Render", "(", "self", ")", ":", "xs", "=", "[", "self", ".", "xs", "[", "0", "]", "]", "ps", "=", "[", "0.0", "]", "for", "i", ",", "p", "in", "enumerate", "(", "self", ".", "ps", ")", ":", "xs", ".", "append", "(", "self", ".", "...
Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Returns: tuple of (xs, ps)
[ "Generates", "a", "sequence", "of", "points", "suitable", "for", "plotting", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1031-L1051
13,760
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Suite.LogUpdate
def LogUpdate(self, data): """Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args: data: any representation of the data """ for hypo in self.Values(): like = self.LogLikelihood(data, hypo) self.Incr(hypo, like)
python
def LogUpdate(self, data): for hypo in self.Values(): like = self.LogLikelihood(data, hypo) self.Incr(hypo, like)
[ "def", "LogUpdate", "(", "self", ",", "data", ")", ":", "for", "hypo", "in", "self", ".", "Values", "(", ")", ":", "like", "=", "self", ".", "LogLikelihood", "(", "data", ",", "hypo", ")", "self", ".", "Incr", "(", "hypo", ",", "like", ")" ]
Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args: data: any representation of the data
[ "Updates", "a", "suite", "of", "hypotheses", "based", "on", "new", "data", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1165-L1178
13,761
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Suite.UpdateSet
def UpdateSet(self, dataset): """Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: the normalizing constant """ for data in dataset: for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize()
python
def UpdateSet(self, dataset): for data in dataset: for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize()
[ "def", "UpdateSet", "(", "self", ",", "dataset", ")", ":", "for", "data", "in", "dataset", ":", "for", "hypo", "in", "self", ".", "Values", "(", ")", ":", "like", "=", "self", ".", "Likelihood", "(", "data", ",", "hypo", ")", "self", ".", "Mult", ...
Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: the normalizing constant
[ "Updates", "each", "hypothesis", "based", "on", "the", "dataset", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1180-L1197
13,762
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Suite.Print
def Print(self): """Prints the hypotheses and their probabilities.""" for hypo, prob in sorted(self.Items()): print(hypo, prob)
python
def Print(self): for hypo, prob in sorted(self.Items()): print(hypo, prob)
[ "def", "Print", "(", "self", ")", ":", "for", "hypo", ",", "prob", "in", "sorted", "(", "self", ".", "Items", "(", ")", ")", ":", "print", "(", "hypo", ",", "prob", ")" ]
Prints the hypotheses and their probabilities.
[ "Prints", "the", "hypotheses", "and", "their", "probabilities", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1228-L1231
13,763
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Suite.MakeOdds
def MakeOdds(self): """Transforms from probabilities to odds. Values with prob=0 are removed. """ for hypo, prob in self.Items(): if prob: self.Set(hypo, Odds(prob)) else: self.Remove(hypo)
python
def MakeOdds(self): for hypo, prob in self.Items(): if prob: self.Set(hypo, Odds(prob)) else: self.Remove(hypo)
[ "def", "MakeOdds", "(", "self", ")", ":", "for", "hypo", ",", "prob", "in", "self", ".", "Items", "(", ")", ":", "if", "prob", ":", "self", ".", "Set", "(", "hypo", ",", "Odds", "(", "prob", ")", ")", "else", ":", "self", ".", "Remove", "(", ...
Transforms from probabilities to odds. Values with prob=0 are removed.
[ "Transforms", "from", "probabilities", "to", "odds", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1233-L1242
13,764
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Suite.MakeProbs
def MakeProbs(self): """Transforms from odds to probabilities.""" for hypo, odds in self.Items(): self.Set(hypo, Probability(odds))
python
def MakeProbs(self): for hypo, odds in self.Items(): self.Set(hypo, Probability(odds))
[ "def", "MakeProbs", "(", "self", ")", ":", "for", "hypo", ",", "odds", "in", "self", ".", "Items", "(", ")", ":", "self", ".", "Set", "(", "hypo", ",", "Probability", "(", "odds", ")", ")" ]
Transforms from odds to probabilities.
[ "Transforms", "from", "odds", "to", "probabilities", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1244-L1247
13,765
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Pdf.MakePmf
def MakePmf(self, xs, name=''): """Makes a discrete version of this Pdf, evaluated at xs. xs: equally-spaced sequence of values Returns: new Pmf """ pmf = Pmf(name=name) for x in xs: pmf.Set(x, self.Density(x)) pmf.Normalize() return pmf
python
def MakePmf(self, xs, name=''): pmf = Pmf(name=name) for x in xs: pmf.Set(x, self.Density(x)) pmf.Normalize() return pmf
[ "def", "MakePmf", "(", "self", ",", "xs", ",", "name", "=", "''", ")", ":", "pmf", "=", "Pmf", "(", "name", "=", "name", ")", "for", "x", "in", "xs", ":", "pmf", ".", "Set", "(", "x", ",", "self", ".", "Density", "(", "x", ")", ")", "pmf", ...
Makes a discrete version of this Pdf, evaluated at xs. xs: equally-spaced sequence of values Returns: new Pmf
[ "Makes", "a", "discrete", "version", "of", "this", "Pdf", "evaluated", "at", "xs", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1332-L1343
13,766
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Beta.Update
def Update(self, data): """Updates a Beta distribution. data: pair of int (heads, tails) """ heads, tails = data self.alpha += heads self.beta += tails
python
def Update(self, data): heads, tails = data self.alpha += heads self.beta += tails
[ "def", "Update", "(", "self", ",", "data", ")", ":", "heads", ",", "tails", "=", "data", "self", ".", "alpha", "+=", "heads", "self", ".", "beta", "+=", "tails" ]
Updates a Beta distribution. data: pair of int (heads, tails)
[ "Updates", "a", "Beta", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1663-L1670
13,767
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Beta.Sample
def Sample(self, n): """Generates a random sample from this distribution. n: int sample size """ size = n, return numpy.random.beta(self.alpha, self.beta, size)
python
def Sample(self, n): size = n, return numpy.random.beta(self.alpha, self.beta, size)
[ "def", "Sample", "(", "self", ",", "n", ")", ":", "size", "=", "n", ",", "return", "numpy", ".", "random", ".", "beta", "(", "self", ".", "alpha", ",", "self", ".", "beta", ",", "size", ")" ]
Generates a random sample from this distribution. n: int sample size
[ "Generates", "a", "random", "sample", "from", "this", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1680-L1686
13,768
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Beta.MakePmf
def MakePmf(self, steps=101, name=''): """Returns a Pmf of this distribution. Note: Normally, we just evaluate the PDF at a sequence of points and treat the probability density as a probability mass. But if alpha or beta is less than one, we have to be more careful because the PDF goes to infinity at x=0 and x=1. In that case we evaluate the CDF and compute differences. """ if self.alpha < 1 or self.beta < 1: cdf = self.MakeCdf() pmf = cdf.MakePmf() return pmf xs = [i / (steps - 1.0) for i in xrange(steps)] probs = [self.EvalPdf(x) for x in xs] pmf = MakePmfFromDict(dict(zip(xs, probs)), name) return pmf
python
def MakePmf(self, steps=101, name=''): if self.alpha < 1 or self.beta < 1: cdf = self.MakeCdf() pmf = cdf.MakePmf() return pmf xs = [i / (steps - 1.0) for i in xrange(steps)] probs = [self.EvalPdf(x) for x in xs] pmf = MakePmfFromDict(dict(zip(xs, probs)), name) return pmf
[ "def", "MakePmf", "(", "self", ",", "steps", "=", "101", ",", "name", "=", "''", ")", ":", "if", "self", ".", "alpha", "<", "1", "or", "self", ".", "beta", "<", "1", ":", "cdf", "=", "self", ".", "MakeCdf", "(", ")", "pmf", "=", "cdf", ".", ...
Returns a Pmf of this distribution. Note: Normally, we just evaluate the PDF at a sequence of points and treat the probability density as a probability mass. But if alpha or beta is less than one, we have to be more careful because the PDF goes to infinity at x=0 and x=1. In that case we evaluate the CDF and compute differences.
[ "Returns", "a", "Pmf", "of", "this", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1692-L1712
13,769
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Beta.MakeCdf
def MakeCdf(self, steps=101): """Returns the CDF of this distribution.""" xs = [i / (steps - 1.0) for i in xrange(steps)] ps = [scipy.special.betainc(self.alpha, self.beta, x) for x in xs] cdf = Cdf(xs, ps) return cdf
python
def MakeCdf(self, steps=101): xs = [i / (steps - 1.0) for i in xrange(steps)] ps = [scipy.special.betainc(self.alpha, self.beta, x) for x in xs] cdf = Cdf(xs, ps) return cdf
[ "def", "MakeCdf", "(", "self", ",", "steps", "=", "101", ")", ":", "xs", "=", "[", "i", "/", "(", "steps", "-", "1.0", ")", "for", "i", "in", "xrange", "(", "steps", ")", "]", "ps", "=", "[", "scipy", ".", "special", ".", "betainc", "(", "sel...
Returns the CDF of this distribution.
[ "Returns", "the", "CDF", "of", "this", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1714-L1719
13,770
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Dirichlet.Update
def Update(self, data): """Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params """ m = len(data) self.params[:m] += data
python
def Update(self, data): m = len(data) self.params[:m] += data
[ "def", "Update", "(", "self", ",", "data", ")", ":", "m", "=", "len", "(", "data", ")", "self", ".", "params", "[", ":", "m", "]", "+=", "data" ]
Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params
[ "Updates", "a", "Dirichlet", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1743-L1749
13,771
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Dirichlet.Random
def Random(self): """Generates a random variate from this distribution. Returns: normalized vector of fractions """ p = numpy.random.gamma(self.params) return p / p.sum()
python
def Random(self): p = numpy.random.gamma(self.params) return p / p.sum()
[ "def", "Random", "(", "self", ")", ":", "p", "=", "numpy", ".", "random", ".", "gamma", "(", "self", ".", "params", ")", "return", "p", "/", "p", ".", "sum", "(", ")" ]
Generates a random variate from this distribution. Returns: normalized vector of fractions
[ "Generates", "a", "random", "variate", "from", "this", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1751-L1757
13,772
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Dirichlet.Likelihood
def Likelihood(self, data): """Computes the likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float probability """ m = len(data) if self.n < m: return 0 x = data p = self.Random() q = p[:m] ** x return q.prod()
python
def Likelihood(self, data): m = len(data) if self.n < m: return 0 x = data p = self.Random() q = p[:m] ** x return q.prod()
[ "def", "Likelihood", "(", "self", ",", "data", ")", ":", "m", "=", "len", "(", "data", ")", "if", "self", ".", "n", "<", "m", ":", "return", "0", "x", "=", "data", "p", "=", "self", ".", "Random", "(", ")", "q", "=", "p", "[", ":", "m", "...
Computes the likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float probability
[ "Computes", "the", "likelihood", "of", "the", "data", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1759-L1773
13,773
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Dirichlet.LogLikelihood
def LogLikelihood(self, data): """Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability """ m = len(data) if self.n < m: return float('-inf') x = self.Random() y = numpy.log(x[:m]) * data return y.sum()
python
def LogLikelihood(self, data): m = len(data) if self.n < m: return float('-inf') x = self.Random() y = numpy.log(x[:m]) * data return y.sum()
[ "def", "LogLikelihood", "(", "self", ",", "data", ")", ":", "m", "=", "len", "(", "data", ")", "if", "self", ".", "n", "<", "m", ":", "return", "float", "(", "'-inf'", ")", "x", "=", "self", ".", "Random", "(", ")", "y", "=", "numpy", ".", "l...
Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability
[ "Computes", "the", "log", "likelihood", "of", "the", "data", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1775-L1788
13,774
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Dirichlet.MarginalBeta
def MarginalBeta(self, i): """Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object """ alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha)
python
def MarginalBeta(self, i): alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha)
[ "def", "MarginalBeta", "(", "self", ",", "i", ")", ":", "alpha0", "=", "self", ".", "params", ".", "sum", "(", ")", "alpha", "=", "self", ".", "params", "[", "i", "]", "return", "Beta", "(", "alpha", ",", "alpha0", "-", "alpha", ")" ]
Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object
[ "Computes", "the", "marginal", "distribution", "of", "the", "ith", "element", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1790-L1802
13,775
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
Dirichlet.PredictivePmf
def PredictivePmf(self, xs, name=''): """Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x """ alpha0 = self.params.sum() ps = self.params / alpha0 return MakePmfFromItems(zip(xs, ps), name=name)
python
def PredictivePmf(self, xs, name=''): alpha0 = self.params.sum() ps = self.params / alpha0 return MakePmfFromItems(zip(xs, ps), name=name)
[ "def", "PredictivePmf", "(", "self", ",", "xs", ",", "name", "=", "''", ")", ":", "alpha0", "=", "self", ".", "params", ".", "sum", "(", ")", "ps", "=", "self", ".", "params", "/", "alpha0", "return", "MakePmfFromItems", "(", "zip", "(", "xs", ",",...
Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x
[ "Makes", "a", "predictive", "distribution", "." ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1804-L1813
13,776
lpantano/seqcluster
seqcluster/libs/report.py
_get_ann
def _get_ann(dbs, features): """ Gives format to annotation for html table output """ value = "" for db, feature in zip(dbs, features): value += db + ":" + feature return value
python
def _get_ann(dbs, features): value = "" for db, feature in zip(dbs, features): value += db + ":" + feature return value
[ "def", "_get_ann", "(", "dbs", ",", "features", ")", ":", "value", "=", "\"\"", "for", "db", ",", "feature", "in", "zip", "(", "dbs", ",", "features", ")", ":", "value", "+=", "db", "+", "\":\"", "+", "feature", "return", "value" ]
Gives format to annotation for html table output
[ "Gives", "format", "to", "annotation", "for", "html", "table", "output" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/report.py#L21-L28
13,777
lpantano/seqcluster
seqcluster/libs/report.py
make_profile
def make_profile(data, out_dir, args): """ Make data report for each cluster """ safe_dirs(out_dir) main_table = [] header = ['id', 'ann'] n = len(data[0]) bar = ProgressBar(maxval=n) bar.start() bar.update(0) for itern, c in enumerate(data[0]): bar.update(itern) logger.debug("creating cluser: {}".format(c)) safe_dirs(os.path.join(out_dir, c)) valid, ann, pos_structure = _single_cluster(c, data, os.path.join(out_dir, c, "maps.tsv"), args) data[0][c].update({'profile': pos_structure}) loci = data[0][c]['loci'] data[0][c]['precursor'] = {"seq": precursor_sequence(loci[0][0:5], args.ref)} data[0][c]['precursor']["colors"] = _parse(data[0][c]['profile'], data[0][c]['precursor']["seq"]) data[0][c]['precursor'].update(run_rnafold(data[0][c]['precursor']['seq'])) return data
python
def make_profile(data, out_dir, args): safe_dirs(out_dir) main_table = [] header = ['id', 'ann'] n = len(data[0]) bar = ProgressBar(maxval=n) bar.start() bar.update(0) for itern, c in enumerate(data[0]): bar.update(itern) logger.debug("creating cluser: {}".format(c)) safe_dirs(os.path.join(out_dir, c)) valid, ann, pos_structure = _single_cluster(c, data, os.path.join(out_dir, c, "maps.tsv"), args) data[0][c].update({'profile': pos_structure}) loci = data[0][c]['loci'] data[0][c]['precursor'] = {"seq": precursor_sequence(loci[0][0:5], args.ref)} data[0][c]['precursor']["colors"] = _parse(data[0][c]['profile'], data[0][c]['precursor']["seq"]) data[0][c]['precursor'].update(run_rnafold(data[0][c]['precursor']['seq'])) return data
[ "def", "make_profile", "(", "data", ",", "out_dir", ",", "args", ")", ":", "safe_dirs", "(", "out_dir", ")", "main_table", "=", "[", "]", "header", "=", "[", "'id'", ",", "'ann'", "]", "n", "=", "len", "(", "data", "[", "0", "]", ")", "bar", "=",...
Make data report for each cluster
[ "Make", "data", "report", "for", "each", "cluster" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/report.py#L39-L61
13,778
lpantano/seqcluster
seqcluster/libs/report.py
_expand
def _expand(dat, counts, start, end): """ expand the same counts from start to end """ for pos in range(start, end): for s in counts: dat[s][pos] += counts[s] return dat
python
def _expand(dat, counts, start, end): for pos in range(start, end): for s in counts: dat[s][pos] += counts[s] return dat
[ "def", "_expand", "(", "dat", ",", "counts", ",", "start", ",", "end", ")", ":", "for", "pos", "in", "range", "(", "start", ",", "end", ")", ":", "for", "s", "in", "counts", ":", "dat", "[", "s", "]", "[", "pos", "]", "+=", "counts", "[", "s"...
expand the same counts from start to end
[ "expand", "the", "same", "counts", "from", "start", "to", "end" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/report.py#L64-L71
13,779
lpantano/seqcluster
seqcluster/libs/report.py
_convert_to_df
def _convert_to_df(in_file, freq, raw_file): """ convert data frame into table with pandas """ dat = defaultdict(Counter) if isinstance(in_file, (str, unicode)): with open(in_file) as in_handle: for line in in_handle: cols = line.strip().split("\t") counts = freq[cols[3]] dat = _expand(dat, counts, int(cols[1]), int(cols[2])) else: if raw_file: out_handle = open(raw_file, "w") for name in in_file: counts = freq[name] if raw_file: print("%s\t%s\t%s\t%s\t%s\t%s" % ("chr", in_file[name][0], in_file[name][1], name, sum(counts.values()), "+"), file=out_handle, end="") dat = _expand(dat, counts, in_file[name][0], in_file[name][1]) for s in dat: for p in dat[s]: dat[s][p] = mlog2(dat[s][p] + 1) return dat
python
def _convert_to_df(in_file, freq, raw_file): dat = defaultdict(Counter) if isinstance(in_file, (str, unicode)): with open(in_file) as in_handle: for line in in_handle: cols = line.strip().split("\t") counts = freq[cols[3]] dat = _expand(dat, counts, int(cols[1]), int(cols[2])) else: if raw_file: out_handle = open(raw_file, "w") for name in in_file: counts = freq[name] if raw_file: print("%s\t%s\t%s\t%s\t%s\t%s" % ("chr", in_file[name][0], in_file[name][1], name, sum(counts.values()), "+"), file=out_handle, end="") dat = _expand(dat, counts, in_file[name][0], in_file[name][1]) for s in dat: for p in dat[s]: dat[s][p] = mlog2(dat[s][p] + 1) return dat
[ "def", "_convert_to_df", "(", "in_file", ",", "freq", ",", "raw_file", ")", ":", "dat", "=", "defaultdict", "(", "Counter", ")", "if", "isinstance", "(", "in_file", ",", "(", "str", ",", "unicode", ")", ")", ":", "with", "open", "(", "in_file", ")", ...
convert data frame into table with pandas
[ "convert", "data", "frame", "into", "table", "with", "pandas" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/report.py#L74-L97
13,780
lpantano/seqcluster
seqcluster/libs/report.py
_make
def _make(c): """ create html from template, adding figure, annotation and sequences counts """ ann = defaultdict(list) for pos in c['ann']: for db in pos: ann[db] += list(pos[db]) logger.debug(ann) valid = [l for l in c['valid']] ann_list = [", ".join(list(set(ann[feature]))) for feature in ann if feature in valid] return valid, ann_list
python
def _make(c): ann = defaultdict(list) for pos in c['ann']: for db in pos: ann[db] += list(pos[db]) logger.debug(ann) valid = [l for l in c['valid']] ann_list = [", ".join(list(set(ann[feature]))) for feature in ann if feature in valid] return valid, ann_list
[ "def", "_make", "(", "c", ")", ":", "ann", "=", "defaultdict", "(", "list", ")", "for", "pos", "in", "c", "[", "'ann'", "]", ":", "for", "db", "in", "pos", ":", "ann", "[", "db", "]", "+=", "list", "(", "pos", "[", "db", "]", ")", "logger", ...
create html from template, adding figure, annotation and sequences counts
[ "create", "html", "from", "template", "adding", "figure", "annotation", "and", "sequences", "counts" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/report.py#L100-L115
13,781
lpantano/seqcluster
seqcluster/libs/report.py
_single_cluster
def _single_cluster(c, data, out_file, args): """ Map sequences on precursors and create expression profile """ valid, ann = 0, 0 raw_file = None freq = defaultdict() [freq.update({s.keys()[0]: s.values()[0]}) for s in data[0][c]['freq']] names = [s.keys()[0] for s in data[0][c]['seqs']] seqs = [s.values()[0] for s in data[0][c]['seqs']] loci = data[0][c]['loci'] if loci[0][3] - loci[0][2] > 500: logger.info("locus bigger > 500 nt, skipping: %s" % loci) return valid, ann, {} if not file_exists(out_file): if args.razer: logger.debug("map with razer all sequences to all loci %s " % loci) map_to_precursors(seqs, names, {loci[0][0]: [loci[0][0:5]]}, out_file, args) else: logger.debug("map with biopython fn all sequences to all loci %s " % loci) if args.debug: raw_file = out_file out_file = map_to_precursor_biopython(seqs, names, loci[0][0:5], args) logger.debug("plot sequences on loci") df = _convert_to_df(out_file, freq, raw_file) if df: valid, ann = _make(data[0][c]) return valid, ann, df
python
def _single_cluster(c, data, out_file, args): valid, ann = 0, 0 raw_file = None freq = defaultdict() [freq.update({s.keys()[0]: s.values()[0]}) for s in data[0][c]['freq']] names = [s.keys()[0] for s in data[0][c]['seqs']] seqs = [s.values()[0] for s in data[0][c]['seqs']] loci = data[0][c]['loci'] if loci[0][3] - loci[0][2] > 500: logger.info("locus bigger > 500 nt, skipping: %s" % loci) return valid, ann, {} if not file_exists(out_file): if args.razer: logger.debug("map with razer all sequences to all loci %s " % loci) map_to_precursors(seqs, names, {loci[0][0]: [loci[0][0:5]]}, out_file, args) else: logger.debug("map with biopython fn all sequences to all loci %s " % loci) if args.debug: raw_file = out_file out_file = map_to_precursor_biopython(seqs, names, loci[0][0:5], args) logger.debug("plot sequences on loci") df = _convert_to_df(out_file, freq, raw_file) if df: valid, ann = _make(data[0][c]) return valid, ann, df
[ "def", "_single_cluster", "(", "c", ",", "data", ",", "out_file", ",", "args", ")", ":", "valid", ",", "ann", "=", "0", ",", "0", "raw_file", "=", "None", "freq", "=", "defaultdict", "(", ")", "[", "freq", ".", "update", "(", "{", "s", ".", "keys...
Map sequences on precursors and create expression profile
[ "Map", "sequences", "on", "precursors", "and", "create", "expression", "profile" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/report.py#L118-L149
13,782
lpantano/seqcluster
seqcluster/methods/__init__.py
read_cluster
def read_cluster(data, id=1): """Read json cluster and populate as cluster class""" cl = cluster(1) # seqs = [s.values()[0] for s in data['seqs']] names = [s.keys()[0] for s in data['seqs']] cl.add_id_member(names, 1) freq = defaultdict() [freq.update({s.keys()[0]: s.values()[0]}) for s in data['freq']]
python
def read_cluster(data, id=1): cl = cluster(1) # seqs = [s.values()[0] for s in data['seqs']] names = [s.keys()[0] for s in data['seqs']] cl.add_id_member(names, 1) freq = defaultdict() [freq.update({s.keys()[0]: s.values()[0]}) for s in data['freq']]
[ "def", "read_cluster", "(", "data", ",", "id", "=", "1", ")", ":", "cl", "=", "cluster", "(", "1", ")", "# seqs = [s.values()[0] for s in data['seqs']]", "names", "=", "[", "s", ".", "keys", "(", ")", "[", "0", "]", "for", "s", "in", "data", "[", "'s...
Read json cluster and populate as cluster class
[ "Read", "json", "cluster", "and", "populate", "as", "cluster", "class" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/methods/__init__.py#L6-L14
13,783
lpantano/seqcluster
seqcluster/libs/read.py
write_data
def write_data(data, out_file): """write json file from seqcluster cluster""" with open(out_file, 'w') as handle_out: handle_out.write(json.dumps([data], skipkeys=True, indent=2))
python
def write_data(data, out_file): with open(out_file, 'w') as handle_out: handle_out.write(json.dumps([data], skipkeys=True, indent=2))
[ "def", "write_data", "(", "data", ",", "out_file", ")", ":", "with", "open", "(", "out_file", ",", "'w'", ")", "as", "handle_out", ":", "handle_out", ".", "write", "(", "json", ".", "dumps", "(", "[", "data", "]", ",", "skipkeys", "=", "True", ",", ...
write json file from seqcluster cluster
[ "write", "json", "file", "from", "seqcluster", "cluster" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L33-L36
13,784
lpantano/seqcluster
seqcluster/libs/read.py
get_sequences_from_cluster
def get_sequences_from_cluster(c1, c2, data): """get all sequences from on cluster""" seqs1 = data[c1]['seqs'] seqs2 = data[c2]['seqs'] seqs = list(set(seqs1 + seqs2)) names = [] for s in seqs: if s in seqs1 and s in seqs2: names.append("both") elif s in seqs1: names.append(c1) else: names.append(c2) return seqs, names
python
def get_sequences_from_cluster(c1, c2, data): seqs1 = data[c1]['seqs'] seqs2 = data[c2]['seqs'] seqs = list(set(seqs1 + seqs2)) names = [] for s in seqs: if s in seqs1 and s in seqs2: names.append("both") elif s in seqs1: names.append(c1) else: names.append(c2) return seqs, names
[ "def", "get_sequences_from_cluster", "(", "c1", ",", "c2", ",", "data", ")", ":", "seqs1", "=", "data", "[", "c1", "]", "[", "'seqs'", "]", "seqs2", "=", "data", "[", "c2", "]", "[", "'seqs'", "]", "seqs", "=", "list", "(", "set", "(", "seqs1", "...
get all sequences from on cluster
[ "get", "all", "sequences", "from", "on", "cluster" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L38-L51
13,785
lpantano/seqcluster
seqcluster/libs/read.py
map_to_precursors
def map_to_precursors(seqs, names, loci, out_file, args): """map sequences to precursors with razers3""" with make_temp_directory() as temp: pre_fasta = os.path.join(temp, "pre.fa") seqs_fasta = os.path.join(temp, "seqs.fa") out_sam = os.path.join(temp, "out.sam") pre_fasta = get_loci_fasta(loci, pre_fasta, args.ref) out_precursor_file = out_file.replace("tsv", "fa") seqs_fasta = get_seqs_fasta(seqs, names, seqs_fasta) # print(open(pre_fasta).read().split("\n")[1]) if find_cmd("razers3"): cmd = "razers3 -dr 2 -i 80 -rr 90 -f -o {out_sam} {temp}/pre.fa {seqs_fasta}" run(cmd.format(**locals())) out_file = read_alignment(out_sam, loci, seqs, out_file) shutil.copy(pre_fasta, out_precursor_file) return out_file
python
def map_to_precursors(seqs, names, loci, out_file, args): with make_temp_directory() as temp: pre_fasta = os.path.join(temp, "pre.fa") seqs_fasta = os.path.join(temp, "seqs.fa") out_sam = os.path.join(temp, "out.sam") pre_fasta = get_loci_fasta(loci, pre_fasta, args.ref) out_precursor_file = out_file.replace("tsv", "fa") seqs_fasta = get_seqs_fasta(seqs, names, seqs_fasta) # print(open(pre_fasta).read().split("\n")[1]) if find_cmd("razers3"): cmd = "razers3 -dr 2 -i 80 -rr 90 -f -o {out_sam} {temp}/pre.fa {seqs_fasta}" run(cmd.format(**locals())) out_file = read_alignment(out_sam, loci, seqs, out_file) shutil.copy(pre_fasta, out_precursor_file) return out_file
[ "def", "map_to_precursors", "(", "seqs", ",", "names", ",", "loci", ",", "out_file", ",", "args", ")", ":", "with", "make_temp_directory", "(", ")", "as", "temp", ":", "pre_fasta", "=", "os", ".", "path", ".", "join", "(", "temp", ",", "\"pre.fa\"", ")...
map sequences to precursors with razers3
[ "map", "sequences", "to", "precursors", "with", "razers3" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L58-L74
13,786
lpantano/seqcluster
seqcluster/libs/read.py
precursor_sequence
def precursor_sequence(loci, reference): """Get sequence from genome""" region = "%s\t%s\t%s\t.\t.\t%s" % (loci[1], loci[2], loci[3], loci[4]) precursor = pybedtools.BedTool(str(region), from_string=True).sequence(fi=reference, s=True) return open(precursor.seqfn).read().split("\n")[1]
python
def precursor_sequence(loci, reference): region = "%s\t%s\t%s\t.\t.\t%s" % (loci[1], loci[2], loci[3], loci[4]) precursor = pybedtools.BedTool(str(region), from_string=True).sequence(fi=reference, s=True) return open(precursor.seqfn).read().split("\n")[1]
[ "def", "precursor_sequence", "(", "loci", ",", "reference", ")", ":", "region", "=", "\"%s\\t%s\\t%s\\t.\\t.\\t%s\"", "%", "(", "loci", "[", "1", "]", ",", "loci", "[", "2", "]", ",", "loci", "[", "3", "]", ",", "loci", "[", "4", "]", ")", "precursor...
Get sequence from genome
[ "Get", "sequence", "from", "genome" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L76-L80
13,787
lpantano/seqcluster
seqcluster/libs/read.py
map_to_precursors_on_fly
def map_to_precursors_on_fly(seqs, names, loci, args): """map sequences to precursors with franpr algorithm to avoid writting on disk""" precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = pyMatch.Match(precursor, str(s), 1, 3) if res > -1: dat[n] = [res, res + len(s)] logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
python
def map_to_precursors_on_fly(seqs, names, loci, args): precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = pyMatch.Match(precursor, str(s), 1, 3) if res > -1: dat[n] = [res, res + len(s)] logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
[ "def", "map_to_precursors_on_fly", "(", "seqs", ",", "names", ",", "loci", ",", "args", ")", ":", "precursor", "=", "precursor_sequence", "(", "loci", ",", "args", ".", "ref", ")", ".", "upper", "(", ")", "dat", "=", "dict", "(", ")", "for", "s", ","...
map sequences to precursors with franpr algorithm to avoid writting on disk
[ "map", "sequences", "to", "precursors", "with", "franpr", "algorithm", "to", "avoid", "writting", "on", "disk" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L82-L91
13,788
lpantano/seqcluster
seqcluster/libs/read.py
map_to_precursor_biopython
def map_to_precursor_biopython(seqs, names, loci, args): """map the sequences using biopython package""" precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = _align(str(s), precursor) if res: dat[n] = res logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
python
def map_to_precursor_biopython(seqs, names, loci, args): precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = _align(str(s), precursor) if res: dat[n] = res logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
[ "def", "map_to_precursor_biopython", "(", "seqs", ",", "names", ",", "loci", ",", "args", ")", ":", "precursor", "=", "precursor_sequence", "(", "loci", ",", "args", ".", "ref", ")", ".", "upper", "(", ")", "dat", "=", "dict", "(", ")", "for", "s", "...
map the sequences using biopython package
[ "map", "the", "sequences", "using", "biopython", "package" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L108-L117
13,789
lpantano/seqcluster
seqcluster/libs/read.py
get_seqs_fasta
def get_seqs_fasta(seqs, names, out_fa): """get fasta from sequences""" with open(out_fa, 'w') as fa_handle: for s, n in itertools.izip(seqs, names): print(">cx{1}-{0}\n{0}".format(s, n), file=fa_handle) return out_fa
python
def get_seqs_fasta(seqs, names, out_fa): with open(out_fa, 'w') as fa_handle: for s, n in itertools.izip(seqs, names): print(">cx{1}-{0}\n{0}".format(s, n), file=fa_handle) return out_fa
[ "def", "get_seqs_fasta", "(", "seqs", ",", "names", ",", "out_fa", ")", ":", "with", "open", "(", "out_fa", ",", "'w'", ")", "as", "fa_handle", ":", "for", "s", ",", "n", "in", "itertools", ".", "izip", "(", "seqs", ",", "names", ")", ":", "print",...
get fasta from sequences
[ "get", "fasta", "from", "sequences" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L138-L143
13,790
lpantano/seqcluster
seqcluster/libs/read.py
get_loci_fasta
def get_loci_fasta(loci, out_fa, ref): """get fasta from precursor""" if not find_cmd("bedtools"): raise ValueError("Not bedtools installed") with make_temp_directory() as temp: bed_file = os.path.join(temp, "file.bed") for nc, loci in loci.iteritems(): for l in loci: with open(bed_file, 'w') as bed_handle: logger.debug("get_fasta: loci %s" % l) nc, c, s, e, st = l print("{0}\t{1}\t{2}\t{3}\t{3}\t{4}".format(c, s, e, nc, st), file=bed_handle) get_fasta(bed_file, ref, out_fa) return out_fa
python
def get_loci_fasta(loci, out_fa, ref): if not find_cmd("bedtools"): raise ValueError("Not bedtools installed") with make_temp_directory() as temp: bed_file = os.path.join(temp, "file.bed") for nc, loci in loci.iteritems(): for l in loci: with open(bed_file, 'w') as bed_handle: logger.debug("get_fasta: loci %s" % l) nc, c, s, e, st = l print("{0}\t{1}\t{2}\t{3}\t{3}\t{4}".format(c, s, e, nc, st), file=bed_handle) get_fasta(bed_file, ref, out_fa) return out_fa
[ "def", "get_loci_fasta", "(", "loci", ",", "out_fa", ",", "ref", ")", ":", "if", "not", "find_cmd", "(", "\"bedtools\"", ")", ":", "raise", "ValueError", "(", "\"Not bedtools installed\"", ")", "with", "make_temp_directory", "(", ")", "as", "temp", ":", "bed...
get fasta from precursor
[ "get", "fasta", "from", "precursor" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L150-L163
13,791
lpantano/seqcluster
seqcluster/libs/read.py
read_alignment
def read_alignment(out_sam, loci, seqs, out_file): """read which seqs map to which loci and return a tab separated file""" hits = defaultdict(list) with open(out_file, "w") as out_handle: samfile = pysam.Samfile(out_sam, "r") for a in samfile.fetch(): if not a.is_unmapped: nm = int([t[1] for t in a.tags if t[0] == "NM"][0]) a = makeBED(a) if not a: continue ref, locus = get_loci(samfile.getrname(int(a.chr)), loci) hits[a.name].append((nm, "%s %s %s %s %s %s" % (a.name, a.name.split("-")[0], locus, ref, a.start, a.end))) for hit in hits.values(): nm = hit[0][0] for l in hit: if nm == l[0]: print(l[1], file=out_handle) return out_file
python
def read_alignment(out_sam, loci, seqs, out_file): hits = defaultdict(list) with open(out_file, "w") as out_handle: samfile = pysam.Samfile(out_sam, "r") for a in samfile.fetch(): if not a.is_unmapped: nm = int([t[1] for t in a.tags if t[0] == "NM"][0]) a = makeBED(a) if not a: continue ref, locus = get_loci(samfile.getrname(int(a.chr)), loci) hits[a.name].append((nm, "%s %s %s %s %s %s" % (a.name, a.name.split("-")[0], locus, ref, a.start, a.end))) for hit in hits.values(): nm = hit[0][0] for l in hit: if nm == l[0]: print(l[1], file=out_handle) return out_file
[ "def", "read_alignment", "(", "out_sam", ",", "loci", ",", "seqs", ",", "out_file", ")", ":", "hits", "=", "defaultdict", "(", "list", ")", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "samfile", "=", "pysam", ".", "Samf...
read which seqs map to which loci and return a tab separated file
[ "read", "which", "seqs", "map", "to", "which", "loci", "and", "return", "a", "tab", "separated", "file" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L165-L184
13,792
lpantano/seqcluster
seqcluster/seqbuster/__init__.py
_download_mirbase
def _download_mirbase(args, version="CURRENT"): """ Download files from mirbase """ if not args.hairpin or not args.mirna: logger.info("Working with version %s" % version) hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz") mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz") if not file_exists(hairpin_fn): cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn) do.run(cmd_h, "download hairpin") if not file_exists(mirna_fn): cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn) do.run(cmd_m, "download mirna") else: return args.hairpin, args.mirna
python
def _download_mirbase(args, version="CURRENT"): if not args.hairpin or not args.mirna: logger.info("Working with version %s" % version) hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz") mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz") if not file_exists(hairpin_fn): cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn) do.run(cmd_h, "download hairpin") if not file_exists(mirna_fn): cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn) do.run(cmd_m, "download mirna") else: return args.hairpin, args.mirna
[ "def", "_download_mirbase", "(", "args", ",", "version", "=", "\"CURRENT\"", ")", ":", "if", "not", "args", ".", "hairpin", "or", "not", "args", ".", "mirna", ":", "logger", ".", "info", "(", "\"Working with version %s\"", "%", "version", ")", "hairpin_fn", ...
Download files from mirbase
[ "Download", "files", "from", "mirbase" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L23-L38
13,793
lpantano/seqcluster
seqcluster/seqbuster/__init__.py
_make_unique
def _make_unique(name, idx): """Make name unique in case only counts there""" p = re.compile(".[aA-zZ]+_x[0-9]+") if p.match(name): tags = name[1:].split("_x") return ">%s_%s_x%s" % (tags[0], idx, tags[1]) return name.replace("@", ">")
python
def _make_unique(name, idx): p = re.compile(".[aA-zZ]+_x[0-9]+") if p.match(name): tags = name[1:].split("_x") return ">%s_%s_x%s" % (tags[0], idx, tags[1]) return name.replace("@", ">")
[ "def", "_make_unique", "(", "name", ",", "idx", ")", ":", "p", "=", "re", ".", "compile", "(", "\".[aA-zZ]+_x[0-9]+\"", ")", "if", "p", ".", "match", "(", "name", ")", ":", "tags", "=", "name", "[", "1", ":", "]", ".", "split", "(", "\"_x\"", ")"...
Make name unique in case only counts there
[ "Make", "name", "unique", "in", "case", "only", "counts", "there" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L41-L47
13,794
lpantano/seqcluster
seqcluster/seqbuster/__init__.py
_filter_seqs
def _filter_seqs(fn): """Convert names of sequences to unique ids""" out_file = op.splitext(fn)[0] + "_unique.fa" idx = 0 if not file_exists(out_file): with open(out_file, 'w') as out_handle: with open(fn) as in_handle: for line in in_handle: if line.startswith("@") or line.startswith(">"): fixed_name = _make_unique(line.strip(), idx) seq = in_handle.next().strip() counts = _get_freq(fixed_name) if len(seq) < 26 and (counts > 1 or counts == 0): idx += 1 print(fixed_name, file=out_handle, end="\n") print(seq, file=out_handle, end="\n") if line.startswith("@"): in_handle.next() in_handle.next() return out_file
python
def _filter_seqs(fn): out_file = op.splitext(fn)[0] + "_unique.fa" idx = 0 if not file_exists(out_file): with open(out_file, 'w') as out_handle: with open(fn) as in_handle: for line in in_handle: if line.startswith("@") or line.startswith(">"): fixed_name = _make_unique(line.strip(), idx) seq = in_handle.next().strip() counts = _get_freq(fixed_name) if len(seq) < 26 and (counts > 1 or counts == 0): idx += 1 print(fixed_name, file=out_handle, end="\n") print(seq, file=out_handle, end="\n") if line.startswith("@"): in_handle.next() in_handle.next() return out_file
[ "def", "_filter_seqs", "(", "fn", ")", ":", "out_file", "=", "op", ".", "splitext", "(", "fn", ")", "[", "0", "]", "+", "\"_unique.fa\"", "idx", "=", "0", "if", "not", "file_exists", "(", "out_file", ")", ":", "with", "open", "(", "out_file", ",", ...
Convert names of sequences to unique ids
[ "Convert", "names", "of", "sequences", "to", "unique", "ids" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L50-L69
13,795
lpantano/seqcluster
seqcluster/seqbuster/__init__.py
_read_precursor
def _read_precursor(precursor, sps): """ Load precursor file for that species """ hairpin = defaultdict(str) name = None with open(precursor) as in_handle: for line in in_handle: if line.startswith(">"): if hairpin[name]: hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" name = line.strip().replace(">", " ").split()[0] else: hairpin[name] += line.strip() hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" return hairpin
python
def _read_precursor(precursor, sps): hairpin = defaultdict(str) name = None with open(precursor) as in_handle: for line in in_handle: if line.startswith(">"): if hairpin[name]: hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" name = line.strip().replace(">", " ").split()[0] else: hairpin[name] += line.strip() hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" return hairpin
[ "def", "_read_precursor", "(", "precursor", ",", "sps", ")", ":", "hairpin", "=", "defaultdict", "(", "str", ")", "name", "=", "None", "with", "open", "(", "precursor", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", "...
Load precursor file for that species
[ "Load", "precursor", "file", "for", "that", "species" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L112-L127
13,796
lpantano/seqcluster
seqcluster/seqbuster/__init__.py
_read_gtf
def _read_gtf(gtf): """ Load GTF file with precursor positions on genome """ if not gtf: return gtf db = defaultdict(list) with open(gtf) as in_handle: for line in in_handle: if line.startswith("#"): continue cols = line.strip().split("\t") name = [n.split("=")[1] for n in cols[-1].split(";") if n.startswith("Name")] chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6] if cols[2] == "miRNA_primary_transcript": db[name[0]].append([chrom, int(start), int(end), strand]) return db
python
def _read_gtf(gtf): if not gtf: return gtf db = defaultdict(list) with open(gtf) as in_handle: for line in in_handle: if line.startswith("#"): continue cols = line.strip().split("\t") name = [n.split("=")[1] for n in cols[-1].split(";") if n.startswith("Name")] chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6] if cols[2] == "miRNA_primary_transcript": db[name[0]].append([chrom, int(start), int(end), strand]) return db
[ "def", "_read_gtf", "(", "gtf", ")", ":", "if", "not", "gtf", ":", "return", "gtf", "db", "=", "defaultdict", "(", "list", ")", "with", "open", "(", "gtf", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "star...
Load GTF file with precursor positions on genome
[ "Load", "GTF", "file", "with", "precursor", "positions", "on", "genome" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L130-L146
13,797
lpantano/seqcluster
seqcluster/seqbuster/__init__.py
_coord
def _coord(sequence, start, mirna, precursor, iso): """ Define t5 and t3 isomirs """ dif = abs(mirna[0] - start) if start < mirna[0]: iso.t5 = sequence[:dif].upper() elif start > mirna[0]: iso.t5 = precursor[mirna[0] - 1:mirna[0] - 1 + dif].lower() elif start == mirna[0]: iso.t5 = "NA" if dif > 4: logger.debug("start > 3 %s %s %s %s %s" % (start, len(sequence), dif, mirna, iso.format())) return None end = start + (len(sequence) - len(iso.add)) - 1 dif = abs(mirna[1] - end) if iso.add: sequence = sequence[:-len(iso.add)] # if dif > 3: # return None if end > mirna[1]: iso.t3 = sequence[-dif:].upper() elif end < mirna[1]: iso.t3 = precursor[mirna[1] - dif:mirna[1]].lower() elif end == mirna[1]: iso.t3 = "NA" if dif > 4: logger.debug("end > 3 %s %s %s %s %s" % (len(sequence), end, dif, mirna, iso.format())) return None logger.debug("%s %s %s %s %s %s" % (start, len(sequence), end, dif, mirna, iso.format())) return True
python
def _coord(sequence, start, mirna, precursor, iso): dif = abs(mirna[0] - start) if start < mirna[0]: iso.t5 = sequence[:dif].upper() elif start > mirna[0]: iso.t5 = precursor[mirna[0] - 1:mirna[0] - 1 + dif].lower() elif start == mirna[0]: iso.t5 = "NA" if dif > 4: logger.debug("start > 3 %s %s %s %s %s" % (start, len(sequence), dif, mirna, iso.format())) return None end = start + (len(sequence) - len(iso.add)) - 1 dif = abs(mirna[1] - end) if iso.add: sequence = sequence[:-len(iso.add)] # if dif > 3: # return None if end > mirna[1]: iso.t3 = sequence[-dif:].upper() elif end < mirna[1]: iso.t3 = precursor[mirna[1] - dif:mirna[1]].lower() elif end == mirna[1]: iso.t3 = "NA" if dif > 4: logger.debug("end > 3 %s %s %s %s %s" % (len(sequence), end, dif, mirna, iso.format())) return None logger.debug("%s %s %s %s %s %s" % (start, len(sequence), end, dif, mirna, iso.format())) return True
[ "def", "_coord", "(", "sequence", ",", "start", ",", "mirna", ",", "precursor", ",", "iso", ")", ":", "dif", "=", "abs", "(", "mirna", "[", "0", "]", "-", "start", ")", "if", "start", "<", "mirna", "[", "0", "]", ":", "iso", ".", "t5", "=", "...
Define t5 and t3 isomirs
[ "Define", "t5", "and", "t3", "isomirs" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L149-L180
13,798
lpantano/seqcluster
seqcluster/seqbuster/__init__.py
_realign
def _realign(seq, precursor, start): """ The actual fn that will realign the sequence """ error = set() pattern_addition = [[1, 1, 0], [1, 0, 1], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 1, 1]] for pos in range(0, len(seq)): if seq[pos] != precursor[(start + pos)]: error.add(pos) subs, add = [], [] for e in error: if e < len(seq) - 3: subs.append([e, seq[e], precursor[start + e]]) pattern, error_add = [], [] for e in range(len(seq) - 3, len(seq)): if e in error: pattern.append(1) error_add.append(e) else: pattern.append(0) for p in pattern_addition: if pattern == p: add = seq[error_add[0]:] break if not add and error_add: for e in error_add: subs.append([e, seq[e], precursor[start + e]]) return subs, add
python
def _realign(seq, precursor, start): error = set() pattern_addition = [[1, 1, 0], [1, 0, 1], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 1, 1]] for pos in range(0, len(seq)): if seq[pos] != precursor[(start + pos)]: error.add(pos) subs, add = [], [] for e in error: if e < len(seq) - 3: subs.append([e, seq[e], precursor[start + e]]) pattern, error_add = [], [] for e in range(len(seq) - 3, len(seq)): if e in error: pattern.append(1) error_add.append(e) else: pattern.append(0) for p in pattern_addition: if pattern == p: add = seq[error_add[0]:] break if not add and error_add: for e in error_add: subs.append([e, seq[e], precursor[start + e]]) return subs, add
[ "def", "_realign", "(", "seq", ",", "precursor", ",", "start", ")", ":", "error", "=", "set", "(", ")", "pattern_addition", "=", "[", "[", "1", ",", "1", ",", "0", "]", ",", "[", "1", ",", "0", ",", "1", "]", ",", "[", "0", ",", "1", ",", ...
The actual fn that will realign the sequence
[ "The", "actual", "fn", "that", "will", "realign", "the", "sequence" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L201-L231
13,799
lpantano/seqcluster
seqcluster/seqbuster/__init__.py
_clean_hits
def _clean_hits(reads): """ Select only best matches """ new_reads = defaultdict(realign) for r in reads: world = {} sc = 0 for p in reads[r].precursors: world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence)) if sc < world[p]: sc = world[p] new_reads[r] = reads[r] for p in world: logger.debug("score %s %s %s" % (r, p, world[p])) if sc != world[p]: logger.debug("remove %s %s %s" % (r, p, world[p])) new_reads[r].remove_precursor(p) return new_reads
python
def _clean_hits(reads): new_reads = defaultdict(realign) for r in reads: world = {} sc = 0 for p in reads[r].precursors: world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence)) if sc < world[p]: sc = world[p] new_reads[r] = reads[r] for p in world: logger.debug("score %s %s %s" % (r, p, world[p])) if sc != world[p]: logger.debug("remove %s %s %s" % (r, p, world[p])) new_reads[r].remove_precursor(p) return new_reads
[ "def", "_clean_hits", "(", "reads", ")", ":", "new_reads", "=", "defaultdict", "(", "realign", ")", "for", "r", "in", "reads", ":", "world", "=", "{", "}", "sc", "=", "0", "for", "p", "in", "reads", "[", "r", "]", ".", "precursors", ":", "world", ...
Select only best matches
[ "Select", "only", "best", "matches" ]
774e23add8cd4fdc83d626cea3bd1f458e7d060d
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L234-L253