repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
biolink/ontobio
ontobio/assocmodel.py
AssociationSet.query_intersections
def query_intersections(self, x_terms=None, y_terms=None, symmetric=False): """ Query for intersections of terms in two lists Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score """ if x_terms is None: x_terms = [] if y_terms is None: y_terms = [] xset = set(x_terms) yset = set(y_terms) zset = xset.union(yset) # first built map of gene->termClosure. # this could be calculated ahead of time for all g, # but this may be space-expensive. TODO: benchmark gmap={} for z in zset: gmap[z] = [] for subj in self.subjects: ancs = self.inferred_types(subj) for a in ancs.intersection(zset): gmap[a].append(subj) for z in zset: gmap[z] = set(gmap[z]) ilist = [] for x in x_terms: for y in y_terms: if not symmetric or x<y: shared = gmap[x].intersection(gmap[y]) union = gmap[x].union(gmap[y]) j = 0 if len(union)>0: j = len(shared) / len(union) ilist.append({'x':x,'y':y,'shared':shared, 'c':len(shared), 'j':j}) return ilist
python
def query_intersections(self, x_terms=None, y_terms=None, symmetric=False): """ Query for intersections of terms in two lists Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score """ if x_terms is None: x_terms = [] if y_terms is None: y_terms = [] xset = set(x_terms) yset = set(y_terms) zset = xset.union(yset) # first built map of gene->termClosure. # this could be calculated ahead of time for all g, # but this may be space-expensive. TODO: benchmark gmap={} for z in zset: gmap[z] = [] for subj in self.subjects: ancs = self.inferred_types(subj) for a in ancs.intersection(zset): gmap[a].append(subj) for z in zset: gmap[z] = set(gmap[z]) ilist = [] for x in x_terms: for y in y_terms: if not symmetric or x<y: shared = gmap[x].intersection(gmap[y]) union = gmap[x].union(gmap[y]) j = 0 if len(union)>0: j = len(shared) / len(union) ilist.append({'x':x,'y':y,'shared':shared, 'c':len(shared), 'j':j}) return ilist
[ "def", "query_intersections", "(", "self", ",", "x_terms", "=", "None", ",", "y_terms", "=", "None", ",", "symmetric", "=", "False", ")", ":", "if", "x_terms", "is", "None", ":", "x_terms", "=", "[", "]", "if", "y_terms", "is", "None", ":", "y_terms", ...
Query for intersections of terms in two lists Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score
[ "Query", "for", "intersections", "of", "terms", "in", "two", "lists" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L226-L266
train
204,900
biolink/ontobio
ontobio/assocmodel.py
AssociationSet.intersectionlist_to_matrix
def intersectionlist_to_matrix(ilist, xterms, yterms): """ WILL BE DEPRECATED Replace with method to return pandas dataframe """ z = [ [0] * len(xterms) for i1 in range(len(yterms)) ] xmap = {} xi = 0 for x in xterms: xmap[x] = xi xi = xi+1 ymap = {} yi = 0 for y in yterms: ymap[y] = yi yi = yi+1 for i in ilist: z[ymap[i['y']]][xmap[i['x']]] = i['j'] logging.debug("Z={}".format(z)) return (z,xterms,yterms)
python
def intersectionlist_to_matrix(ilist, xterms, yterms): """ WILL BE DEPRECATED Replace with method to return pandas dataframe """ z = [ [0] * len(xterms) for i1 in range(len(yterms)) ] xmap = {} xi = 0 for x in xterms: xmap[x] = xi xi = xi+1 ymap = {} yi = 0 for y in yterms: ymap[y] = yi yi = yi+1 for i in ilist: z[ymap[i['y']]][xmap[i['x']]] = i['j'] logging.debug("Z={}".format(z)) return (z,xterms,yterms)
[ "def", "intersectionlist_to_matrix", "(", "ilist", ",", "xterms", ",", "yterms", ")", ":", "z", "=", "[", "[", "0", "]", "*", "len", "(", "xterms", ")", "for", "i1", "in", "range", "(", "len", "(", "yterms", ")", ")", "]", "xmap", "=", "{", "}", ...
WILL BE DEPRECATED Replace with method to return pandas dataframe
[ "WILL", "BE", "DEPRECATED" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L269-L293
train
204,901
biolink/ontobio
ontobio/assocmodel.py
AssociationSet.as_dataframe
def as_dataframe(self, fillna=True, subjects=None): """ Return association set as pandas DataFrame Each row is a subject (e.g. gene) Each column is the inferred class used to describe the subject """ entries = [] selected_subjects = self.subjects if subjects is not None: selected_subjects = subjects for s in selected_subjects: vmap = {} for c in self.inferred_types(s): vmap[c] = 1 entries.append(vmap) logging.debug("Creating DataFrame") df = pd.DataFrame(entries, index=selected_subjects) if fillna: logging.debug("Performing fillna...") df = df.fillna(0) return df
python
def as_dataframe(self, fillna=True, subjects=None): """ Return association set as pandas DataFrame Each row is a subject (e.g. gene) Each column is the inferred class used to describe the subject """ entries = [] selected_subjects = self.subjects if subjects is not None: selected_subjects = subjects for s in selected_subjects: vmap = {} for c in self.inferred_types(s): vmap[c] = 1 entries.append(vmap) logging.debug("Creating DataFrame") df = pd.DataFrame(entries, index=selected_subjects) if fillna: logging.debug("Performing fillna...") df = df.fillna(0) return df
[ "def", "as_dataframe", "(", "self", ",", "fillna", "=", "True", ",", "subjects", "=", "None", ")", ":", "entries", "=", "[", "]", "selected_subjects", "=", "self", ".", "subjects", "if", "subjects", "is", "not", "None", ":", "selected_subjects", "=", "su...
Return association set as pandas DataFrame Each row is a subject (e.g. gene) Each column is the inferred class used to describe the subject
[ "Return", "association", "set", "as", "pandas", "DataFrame" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L295-L317
train
204,902
biolink/ontobio
ontobio/assocmodel.py
AssociationSet.label
def label(self, id): """ return label for a subject id Will make use of both the ontology and the association set """ if self.ontology is not None: label = self.ontology.label(id) if label is not None: return label if self.subject_label_map is not None and id in self.subject_label_map: return self.subject_label_map[id] return None
python
def label(self, id): """ return label for a subject id Will make use of both the ontology and the association set """ if self.ontology is not None: label = self.ontology.label(id) if label is not None: return label if self.subject_label_map is not None and id in self.subject_label_map: return self.subject_label_map[id] return None
[ "def", "label", "(", "self", ",", "id", ")", ":", "if", "self", ".", "ontology", "is", "not", "None", ":", "label", "=", "self", ".", "ontology", ".", "label", "(", "id", ")", "if", "label", "is", "not", "None", ":", "return", "label", "if", "sel...
return label for a subject id Will make use of both the ontology and the association set
[ "return", "label", "for", "a", "subject", "id" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L319-L331
train
204,903
biolink/ontobio
ontobio/assocmodel.py
AssociationSet.subontology
def subontology(self, minimal=False): """ Generates a sub-ontology based on associations """ return self.ontology.subontology(self.objects, minimal=minimal)
python
def subontology(self, minimal=False): """ Generates a sub-ontology based on associations """ return self.ontology.subontology(self.objects, minimal=minimal)
[ "def", "subontology", "(", "self", ",", "minimal", "=", "False", ")", ":", "return", "self", ".", "ontology", ".", "subontology", "(", "self", ".", "objects", ",", "minimal", "=", "minimal", ")" ]
Generates a sub-ontology based on associations
[ "Generates", "a", "sub", "-", "ontology", "based", "on", "associations" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L333-L337
train
204,904
biolink/ontobio
ontobio/assocmodel.py
AssociationSet.similarity_matrix
def similarity_matrix(self, x_subjects=None, y_subjects=None, symmetric=False): """ Query for similarity matrix between groups of subjects Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score """ if x_subjects is None: x_subjects = [] if y_subjects is None: y_subjects = [] xset = set(x_subjects) yset = set(y_subjects) zset = xset.union(yset) # first built map of gene->termClosure. # this could be calculated ahead of time for all g, # but this may be space-expensive. TODO: benchmark gmap={} for z in zset: gmap[z] = self.inferred_types(z) ilist = [] for x in x_subjects: for y in y_subjects: if not symmetric or x<y: shared = gmap[x].intersection(gmap[y]) union = gmap[x].union(gmap[y]) j = 0 if len(union)>0: j = len(shared) / len(union) ilist.append({'x':x,'y':y,'shared':shared, 'c':len(shared), 'j':j}) return self.intersectionlist_to_matrix(ilist, x_subjects, y_subjects)
python
def similarity_matrix(self, x_subjects=None, y_subjects=None, symmetric=False): """ Query for similarity matrix between groups of subjects Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score """ if x_subjects is None: x_subjects = [] if y_subjects is None: y_subjects = [] xset = set(x_subjects) yset = set(y_subjects) zset = xset.union(yset) # first built map of gene->termClosure. # this could be calculated ahead of time for all g, # but this may be space-expensive. TODO: benchmark gmap={} for z in zset: gmap[z] = self.inferred_types(z) ilist = [] for x in x_subjects: for y in y_subjects: if not symmetric or x<y: shared = gmap[x].intersection(gmap[y]) union = gmap[x].union(gmap[y]) j = 0 if len(union)>0: j = len(shared) / len(union) ilist.append({'x':x,'y':y,'shared':shared, 'c':len(shared), 'j':j}) return self.intersectionlist_to_matrix(ilist, x_subjects, y_subjects)
[ "def", "similarity_matrix", "(", "self", ",", "x_subjects", "=", "None", ",", "y_subjects", "=", "None", ",", "symmetric", "=", "False", ")", ":", "if", "x_subjects", "is", "None", ":", "x_subjects", "=", "[", "]", "if", "y_subjects", "is", "None", ":", ...
Query for similarity matrix between groups of subjects Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score
[ "Query", "for", "similarity", "matrix", "between", "groups", "of", "subjects" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L476-L511
train
204,905
biolink/ontobio
ontobio/sparql/sparql_ontol_utils.py
get_edges
def get_edges(ont): """ Fetches all basic edges from a remote ontology """ logging.info("QUERYING:"+ont) edges = [(c,SUBCLASS_OF, d) for (c,d) in fetchall_isa(ont)] edges += fetchall_svf(ont) edges += [(c,SUBPROPERTY_OF, d) for (c,d) in fetchall_subPropertyOf(ont)] if len(edges) == 0: logging.warn("No edges for {}".format(ont)) return edges
python
def get_edges(ont): """ Fetches all basic edges from a remote ontology """ logging.info("QUERYING:"+ont) edges = [(c,SUBCLASS_OF, d) for (c,d) in fetchall_isa(ont)] edges += fetchall_svf(ont) edges += [(c,SUBPROPERTY_OF, d) for (c,d) in fetchall_subPropertyOf(ont)] if len(edges) == 0: logging.warn("No edges for {}".format(ont)) return edges
[ "def", "get_edges", "(", "ont", ")", ":", "logging", ".", "info", "(", "\"QUERYING:\"", "+", "ont", ")", "edges", "=", "[", "(", "c", ",", "SUBCLASS_OF", ",", "d", ")", "for", "(", "c", ",", "d", ")", "in", "fetchall_isa", "(", "ont", ")", "]", ...
Fetches all basic edges from a remote ontology
[ "Fetches", "all", "basic", "edges", "from", "a", "remote", "ontology" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/sparql_ontol_utils.py#L91-L101
train
204,906
biolink/ontobio
ontobio/sparql/sparql_ontol_utils.py
transformArray
def transformArray(data, keysToSplit=[]): """ Transform a SPARQL json array based on the rules of transform """ transformed = [ ] for item in data: transformed.append(transform(item, keysToSplit)) return transformed
python
def transformArray(data, keysToSplit=[]): """ Transform a SPARQL json array based on the rules of transform """ transformed = [ ] for item in data: transformed.append(transform(item, keysToSplit)) return transformed
[ "def", "transformArray", "(", "data", ",", "keysToSplit", "=", "[", "]", ")", ":", "transformed", "=", "[", "]", "for", "item", "in", "data", ":", "transformed", ".", "append", "(", "transform", "(", "item", ",", "keysToSplit", ")", ")", "return", "tra...
Transform a SPARQL json array based on the rules of transform
[ "Transform", "a", "SPARQL", "json", "array", "based", "on", "the", "rules", "of", "transform" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/sparql_ontol_utils.py#L405-L412
train
204,907
biolink/ontobio
ontobio/ecomap.py
EcoMap.coderef_to_ecoclass
def coderef_to_ecoclass(self, code, reference=None): """ Map a GAF code to an ECO class Arguments --------- code : str GAF evidence code, e.g. ISS, IDA reference: str CURIE for a reference for the evidence instance. E.g. GO_REF:0000001. Optional - If provided can give a mapping to a more specific ECO class Return ------ str ECO class CURIE/ID """ mcls = None for (this_code,this_ref,cls) in self.mappings(): if str(this_code) == str(code): if this_ref == reference: return cls if this_ref is None: mcls = cls return mcls
python
def coderef_to_ecoclass(self, code, reference=None): """ Map a GAF code to an ECO class Arguments --------- code : str GAF evidence code, e.g. ISS, IDA reference: str CURIE for a reference for the evidence instance. E.g. GO_REF:0000001. Optional - If provided can give a mapping to a more specific ECO class Return ------ str ECO class CURIE/ID """ mcls = None for (this_code,this_ref,cls) in self.mappings(): if str(this_code) == str(code): if this_ref == reference: return cls if this_ref is None: mcls = cls return mcls
[ "def", "coderef_to_ecoclass", "(", "self", ",", "code", ",", "reference", "=", "None", ")", ":", "mcls", "=", "None", "for", "(", "this_code", ",", "this_ref", ",", "cls", ")", "in", "self", ".", "mappings", "(", ")", ":", "if", "str", "(", "this_cod...
Map a GAF code to an ECO class Arguments --------- code : str GAF evidence code, e.g. ISS, IDA reference: str CURIE for a reference for the evidence instance. E.g. GO_REF:0000001. Optional - If provided can give a mapping to a more specific ECO class Return ------ str ECO class CURIE/ID
[ "Map", "a", "GAF", "code", "to", "an", "ECO", "class" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ecomap.py#L57-L82
train
204,908
biolink/ontobio
ontobio/ecomap.py
EcoMap.ecoclass_to_coderef
def ecoclass_to_coderef(self, cls): """ Map an ECO class to a GAF code This is the reciprocal to :ref:`coderef_to_ecoclass` Arguments --------- cls : str GAF evidence code, e.g. ISS, IDA reference: str ECO class CURIE/ID Return ------ (str, str) code, reference tuple """ code = '' ref = None for (code,ref,this_cls) in self.mappings(): if cls == this_cls: return code, ref return None, None
python
def ecoclass_to_coderef(self, cls): """ Map an ECO class to a GAF code This is the reciprocal to :ref:`coderef_to_ecoclass` Arguments --------- cls : str GAF evidence code, e.g. ISS, IDA reference: str ECO class CURIE/ID Return ------ (str, str) code, reference tuple """ code = '' ref = None for (code,ref,this_cls) in self.mappings(): if cls == this_cls: return code, ref return None, None
[ "def", "ecoclass_to_coderef", "(", "self", ",", "cls", ")", ":", "code", "=", "''", "ref", "=", "None", "for", "(", "code", ",", "ref", ",", "this_cls", ")", "in", "self", ".", "mappings", "(", ")", ":", "if", "cls", "==", "this_cls", ":", "return"...
Map an ECO class to a GAF code This is the reciprocal to :ref:`coderef_to_ecoclass` Arguments --------- cls : str GAF evidence code, e.g. ISS, IDA reference: str ECO class CURIE/ID Return ------ (str, str) code, reference tuple
[ "Map", "an", "ECO", "class", "to", "a", "GAF", "code" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ecomap.py#L84-L107
train
204,909
biolink/ontobio
ontobio/ontol_factory.py
get_checksum
def get_checksum(file): """ Get SHA256 hash from the contents of a given file """ with open(file, 'rb') as FH: contents = FH.read() return hashlib.sha256(contents).hexdigest()
python
def get_checksum(file): """ Get SHA256 hash from the contents of a given file """ with open(file, 'rb') as FH: contents = FH.read() return hashlib.sha256(contents).hexdigest()
[ "def", "get_checksum", "(", "file", ")", ":", "with", "open", "(", "file", ",", "'rb'", ")", "as", "FH", ":", "contents", "=", "FH", ".", "read", "(", ")", "return", "hashlib", ".", "sha256", "(", "contents", ")", ".", "hexdigest", "(", ")" ]
Get SHA256 hash from the contents of a given file
[ "Get", "SHA256", "hash", "from", "the", "contents", "of", "a", "given", "file" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol_factory.py#L177-L183
train
204,910
biolink/ontobio
ontobio/ontol_factory.py
OntologyFactory.create
def create(self, handle=None, handle_type=None, **args): """ Creates an ontology based on a handle Handle is one of the following - `FILENAME.json` : creates an ontology from an obographs json file - `obo:ONTID` : E.g. obo:pato - creates an ontology from obolibrary PURL (requires owltools) - `ONTID` : E.g. 'pato' - creates an ontology from a remote SPARQL query Arguments --------- handle : str specifies how to retrieve the ontology info """ if handle is None: self.test = self.test+1 logging.info("T: "+str(self.test)) global default_ontology if default_ontology is None: logging.info("Creating new instance of default ontology") default_ontology = create_ontology(default_ontology_handle) logging.info("Using default_ontology") return default_ontology return create_ontology(handle, **args)
python
def create(self, handle=None, handle_type=None, **args): """ Creates an ontology based on a handle Handle is one of the following - `FILENAME.json` : creates an ontology from an obographs json file - `obo:ONTID` : E.g. obo:pato - creates an ontology from obolibrary PURL (requires owltools) - `ONTID` : E.g. 'pato' - creates an ontology from a remote SPARQL query Arguments --------- handle : str specifies how to retrieve the ontology info """ if handle is None: self.test = self.test+1 logging.info("T: "+str(self.test)) global default_ontology if default_ontology is None: logging.info("Creating new instance of default ontology") default_ontology = create_ontology(default_ontology_handle) logging.info("Using default_ontology") return default_ontology return create_ontology(handle, **args)
[ "def", "create", "(", "self", ",", "handle", "=", "None", ",", "handle_type", "=", "None", ",", "*", "*", "args", ")", ":", "if", "handle", "is", "None", ":", "self", ".", "test", "=", "self", ".", "test", "+", "1", "logging", ".", "info", "(", ...
Creates an ontology based on a handle Handle is one of the following - `FILENAME.json` : creates an ontology from an obographs json file - `obo:ONTID` : E.g. obo:pato - creates an ontology from obolibrary PURL (requires owltools) - `ONTID` : E.g. 'pato' - creates an ontology from a remote SPARQL query Arguments --------- handle : str specifies how to retrieve the ontology info
[ "Creates", "an", "ontology", "based", "on", "a", "handle" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol_factory.py#L51-L76
train
204,911
biolink/ontobio
ontobio/io/parsereport.py
Report._rule_id
def _rule_id(self, id: int) -> str: """ Convert an integer into a gorule key id. """ if id is None or id == 0 or id >= 10000000: return "other" return "gorule-{:0>7}".format(id)
python
def _rule_id(self, id: int) -> str: """ Convert an integer into a gorule key id. """ if id is None or id == 0 or id >= 10000000: return "other" return "gorule-{:0>7}".format(id)
[ "def", "_rule_id", "(", "self", ",", "id", ":", "int", ")", "->", "str", ":", "if", "id", "is", "None", "or", "id", "==", "0", "or", "id", ">=", "10000000", ":", "return", "\"other\"", "return", "\"gorule-{:0>7}\"", ".", "format", "(", "id", ")" ]
Convert an integer into a gorule key id.
[ "Convert", "an", "integer", "into", "a", "gorule", "key", "id", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/parsereport.py#L17-L24
train
204,912
biolink/ontobio
ontobio/obograph_util.py
convert_json_file
def convert_json_file(obographfile, **args): """ Return a networkx MultiDiGraph of the ontologies serialized as a json string """ f = open(obographfile, 'r') jsonstr = f.read() f.close() return convert_json_object(json.loads(jsonstr), **args)
python
def convert_json_file(obographfile, **args): """ Return a networkx MultiDiGraph of the ontologies serialized as a json string """ f = open(obographfile, 'r') jsonstr = f.read() f.close() return convert_json_object(json.loads(jsonstr), **args)
[ "def", "convert_json_file", "(", "obographfile", ",", "*", "*", "args", ")", ":", "f", "=", "open", "(", "obographfile", ",", "'r'", ")", "jsonstr", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "return", "convert_json_object", "(", "...
Return a networkx MultiDiGraph of the ontologies serialized as a json string
[ "Return", "a", "networkx", "MultiDiGraph", "of", "the", "ontologies", "serialized", "as", "a", "json", "string" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/obograph_util.py#L107-L116
train
204,913
biolink/ontobio
ontobio/obograph_util.py
convert_json_object
def convert_json_object(obographdoc, **args): """ Return a networkx MultiDiGraph of the ontologies serialized as a json object """ digraph = networkx.MultiDiGraph() xref_graph = networkx.MultiGraph() logical_definitions = [] property_chain_axioms = [] context = obographdoc.get('@context',{}) logging.info("CONTEXT: {}".format(context)) mapper = OboJsonMapper(digraph=digraph, context=context) ogs = obographdoc['graphs'] base_og = ogs[0] for og in ogs: # TODO: refactor this mapper.add_obograph_digraph(og, xref_graph=xref_graph, logical_definitions=logical_definitions, property_chain_axioms=property_chain_axioms, **args) return { 'id': base_og.get('id'), 'meta': base_og.get('meta'), 'graph': mapper.digraph, 'xref_graph': xref_graph, 'graphdoc': obographdoc, 'logical_definitions': logical_definitions, 'property_chain_axioms': property_chain_axioms }
python
def convert_json_object(obographdoc, **args): """ Return a networkx MultiDiGraph of the ontologies serialized as a json object """ digraph = networkx.MultiDiGraph() xref_graph = networkx.MultiGraph() logical_definitions = [] property_chain_axioms = [] context = obographdoc.get('@context',{}) logging.info("CONTEXT: {}".format(context)) mapper = OboJsonMapper(digraph=digraph, context=context) ogs = obographdoc['graphs'] base_og = ogs[0] for og in ogs: # TODO: refactor this mapper.add_obograph_digraph(og, xref_graph=xref_graph, logical_definitions=logical_definitions, property_chain_axioms=property_chain_axioms, **args) return { 'id': base_og.get('id'), 'meta': base_og.get('meta'), 'graph': mapper.digraph, 'xref_graph': xref_graph, 'graphdoc': obographdoc, 'logical_definitions': logical_definitions, 'property_chain_axioms': property_chain_axioms }
[ "def", "convert_json_object", "(", "obographdoc", ",", "*", "*", "args", ")", ":", "digraph", "=", "networkx", ".", "MultiDiGraph", "(", ")", "xref_graph", "=", "networkx", ".", "MultiGraph", "(", ")", "logical_definitions", "=", "[", "]", "property_chain_axio...
Return a networkx MultiDiGraph of the ontologies serialized as a json object
[ "Return", "a", "networkx", "MultiDiGraph", "of", "the", "ontologies", "serialized", "as", "a", "json", "object" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/obograph_util.py#L118-L147
train
204,914
google/importlab
importlab/resolve.py
infer_module_name
def infer_module_name(filename, fspath): """Convert a python filename to a module relative to pythonpath.""" filename, _ = os.path.splitext(filename) for f in fspath: short_name = f.relative_path(filename) if short_name: # The module name for __init__.py files is the directory. if short_name.endswith(os.path.sep + "__init__"): short_name = short_name[:short_name.rfind(os.path.sep)] return short_name.replace(os.path.sep, '.') # We have not found filename relative to anywhere in pythonpath. return ''
python
def infer_module_name(filename, fspath): """Convert a python filename to a module relative to pythonpath.""" filename, _ = os.path.splitext(filename) for f in fspath: short_name = f.relative_path(filename) if short_name: # The module name for __init__.py files is the directory. if short_name.endswith(os.path.sep + "__init__"): short_name = short_name[:short_name.rfind(os.path.sep)] return short_name.replace(os.path.sep, '.') # We have not found filename relative to anywhere in pythonpath. return ''
[ "def", "infer_module_name", "(", "filename", ",", "fspath", ")", ":", "filename", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "for", "f", "in", "fspath", ":", "short_name", "=", "f", ".", "relative_path", "(", "filename", "...
Convert a python filename to a module relative to pythonpath.
[ "Convert", "a", "python", "filename", "to", "a", "module", "relative", "to", "pythonpath", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/resolve.py#L97-L108
train
204,915
google/importlab
importlab/resolve.py
get_absolute_name
def get_absolute_name(package, relative_name): """Joins a package name and a relative name. Args: package: A dotted name, e.g. foo.bar.baz relative_name: A dotted name with possibly some leading dots, e.g. ..x.y Returns: The relative name appended to the parent's package, after going up one level for each leading dot. e.g. foo.bar.baz + ..hello.world -> foo.hello.world The unchanged relative_name if it does not start with a dot or has too many leading dots. """ path = package.split('.') if package else [] name = relative_name.lstrip('.') ndots = len(relative_name) - len(name) if ndots > len(path): return relative_name absolute_path = path[:len(path) + 1 - ndots] if name: absolute_path.append(name) return '.'.join(absolute_path)
python
def get_absolute_name(package, relative_name): """Joins a package name and a relative name. Args: package: A dotted name, e.g. foo.bar.baz relative_name: A dotted name with possibly some leading dots, e.g. ..x.y Returns: The relative name appended to the parent's package, after going up one level for each leading dot. e.g. foo.bar.baz + ..hello.world -> foo.hello.world The unchanged relative_name if it does not start with a dot or has too many leading dots. """ path = package.split('.') if package else [] name = relative_name.lstrip('.') ndots = len(relative_name) - len(name) if ndots > len(path): return relative_name absolute_path = path[:len(path) + 1 - ndots] if name: absolute_path.append(name) return '.'.join(absolute_path)
[ "def", "get_absolute_name", "(", "package", ",", "relative_name", ")", ":", "path", "=", "package", ".", "split", "(", "'.'", ")", "if", "package", "else", "[", "]", "name", "=", "relative_name", ".", "lstrip", "(", "'.'", ")", "ndots", "=", "len", "("...
Joins a package name and a relative name. Args: package: A dotted name, e.g. foo.bar.baz relative_name: A dotted name with possibly some leading dots, e.g. ..x.y Returns: The relative name appended to the parent's package, after going up one level for each leading dot. e.g. foo.bar.baz + ..hello.world -> foo.hello.world The unchanged relative_name if it does not start with a dot or has too many leading dots.
[ "Joins", "a", "package", "name", "and", "a", "relative", "name", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/resolve.py#L111-L133
train
204,916
google/importlab
importlab/resolve.py
Resolver.resolve_import
def resolve_import(self, item): """Simulate how Python resolves imports. Returns the filename of the source file Python would load when processing a statement like 'import name' in the module we're currently under. Args: item: An instance of ImportItem Returns: A filename Raises: ImportException: If the module doesn't exist. """ name = item.name # The last part in `from a.b.c import d` might be a symbol rather than a # module, so we try a.b.c and a.b.c.d as names. short_name = None if item.is_from and not item.is_star: if '.' in name.lstrip('.'): # The name is something like `a.b.c`, so strip off `.c`. rindex = name.rfind('.') else: # The name is something like `..c`, so strip off just `c`. rindex = name.rfind('.') + 1 short_name = name[:rindex] if import_finder.is_builtin(name): filename = name + '.so' return Builtin(filename, name) filename, level = convert_to_path(name) if level: # This is a relative import; we need to resolve the filename # relative to the importing file path. filename = os.path.normpath( os.path.join(self.current_directory, filename)) files = [(name, filename)] if short_name: short_filename = os.path.dirname(filename) files.append((short_name, short_filename)) for module_name, path in files: for fs in self.fs_path: f = self._find_file(fs, path) if not f or f == self.current_module.path: # We cannot import a file from itself. continue if item.is_relative(): package_name = self.current_module.package_name if package_name is None: # Relative import in non-package raise ImportException(name) module_name = get_absolute_name(package_name, module_name) if isinstance(self.current_module, System): return System(f, module_name) return Local(f, module_name, fs) # If the module isn't found in the explicit pythonpath, see if python # itself resolved it. if item.source: prefix, ext = os.path.splitext(item.source) mod_name = name # We need to check for importing a symbol here too. if short_name: mod = prefix.replace(os.path.sep, '.') mod = utils.strip_suffix(mod, '.__init__') if not mod.endswith(name) and mod.endswith(short_name): mod_name = short_name if ext == '.pyc': pyfile = prefix + '.py' if os.path.exists(pyfile): return System(pyfile, mod_name) elif not ext: pyfile = os.path.join(prefix, "__init__.py") if os.path.exists(pyfile): return System(pyfile, mod_name) return System(item.source, mod_name) raise ImportException(name)
python
def resolve_import(self, item): """Simulate how Python resolves imports. Returns the filename of the source file Python would load when processing a statement like 'import name' in the module we're currently under. Args: item: An instance of ImportItem Returns: A filename Raises: ImportException: If the module doesn't exist. """ name = item.name # The last part in `from a.b.c import d` might be a symbol rather than a # module, so we try a.b.c and a.b.c.d as names. short_name = None if item.is_from and not item.is_star: if '.' in name.lstrip('.'): # The name is something like `a.b.c`, so strip off `.c`. rindex = name.rfind('.') else: # The name is something like `..c`, so strip off just `c`. rindex = name.rfind('.') + 1 short_name = name[:rindex] if import_finder.is_builtin(name): filename = name + '.so' return Builtin(filename, name) filename, level = convert_to_path(name) if level: # This is a relative import; we need to resolve the filename # relative to the importing file path. filename = os.path.normpath( os.path.join(self.current_directory, filename)) files = [(name, filename)] if short_name: short_filename = os.path.dirname(filename) files.append((short_name, short_filename)) for module_name, path in files: for fs in self.fs_path: f = self._find_file(fs, path) if not f or f == self.current_module.path: # We cannot import a file from itself. continue if item.is_relative(): package_name = self.current_module.package_name if package_name is None: # Relative import in non-package raise ImportException(name) module_name = get_absolute_name(package_name, module_name) if isinstance(self.current_module, System): return System(f, module_name) return Local(f, module_name, fs) # If the module isn't found in the explicit pythonpath, see if python # itself resolved it. if item.source: prefix, ext = os.path.splitext(item.source) mod_name = name # We need to check for importing a symbol here too. if short_name: mod = prefix.replace(os.path.sep, '.') mod = utils.strip_suffix(mod, '.__init__') if not mod.endswith(name) and mod.endswith(short_name): mod_name = short_name if ext == '.pyc': pyfile = prefix + '.py' if os.path.exists(pyfile): return System(pyfile, mod_name) elif not ext: pyfile = os.path.join(prefix, "__init__.py") if os.path.exists(pyfile): return System(pyfile, mod_name) return System(item.source, mod_name) raise ImportException(name)
[ "def", "resolve_import", "(", "self", ",", "item", ")", ":", "name", "=", "item", ".", "name", "# The last part in `from a.b.c import d` might be a symbol rather than a", "# module, so we try a.b.c and a.b.c.d as names.", "short_name", "=", "None", "if", "item", ".", "is_fr...
Simulate how Python resolves imports. Returns the filename of the source file Python would load when processing a statement like 'import name' in the module we're currently under. Args: item: An instance of ImportItem Returns: A filename Raises: ImportException: If the module doesn't exist.
[ "Simulate", "how", "Python", "resolves", "imports", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/resolve.py#L150-L233
train
204,917
google/importlab
importlab/resolve.py
Resolver.resolve_all
def resolve_all(self, import_items): """Resolves a list of imports. Yields filenames. """ for import_item in import_items: try: yield self.resolve_import(import_item) except ImportException as err: logging.info('unknown module %s', err.module_name)
python
def resolve_all(self, import_items): """Resolves a list of imports. Yields filenames. """ for import_item in import_items: try: yield self.resolve_import(import_item) except ImportException as err: logging.info('unknown module %s', err.module_name)
[ "def", "resolve_all", "(", "self", ",", "import_items", ")", ":", "for", "import_item", "in", "import_items", ":", "try", ":", "yield", "self", ".", "resolve_import", "(", "import_item", ")", "except", "ImportException", "as", "err", ":", "logging", ".", "in...
Resolves a list of imports. Yields filenames.
[ "Resolves", "a", "list", "of", "imports", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/resolve.py#L235-L244
train
204,918
google/importlab
importlab/environment.py
path_from_pythonpath
def path_from_pythonpath(pythonpath): """Create an fs.Path object from a pythonpath string.""" path = fs.Path() for p in pythonpath.split(os.pathsep): path.add_path(utils.expand_path(p), 'os') return path
python
def path_from_pythonpath(pythonpath): """Create an fs.Path object from a pythonpath string.""" path = fs.Path() for p in pythonpath.split(os.pathsep): path.add_path(utils.expand_path(p), 'os') return path
[ "def", "path_from_pythonpath", "(", "pythonpath", ")", ":", "path", "=", "fs", ".", "Path", "(", ")", "for", "p", "in", "pythonpath", ".", "split", "(", "os", ".", "pathsep", ")", ":", "path", ".", "add_path", "(", "utils", ".", "expand_path", "(", "...
Create an fs.Path object from a pythonpath string.
[ "Create", "an", "fs", ".", "Path", "object", "from", "a", "pythonpath", "string", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/environment.py#L13-L18
train
204,919
google/importlab
importlab/output.py
format_file_node
def format_file_node(import_graph, node, indent): """Prettyprint nodes based on their provenance.""" f = import_graph.provenance[node] if isinstance(f, resolve.Direct): out = '+ ' + f.short_path elif isinstance(f, resolve.Local): out = ' ' + f.short_path elif isinstance(f, resolve.System): out = ':: ' + f.short_path elif isinstance(f, resolve.Builtin): out = '(%s)' % f.module_name else: out = '%r' % node return ' '*indent + out
python
def format_file_node(import_graph, node, indent): """Prettyprint nodes based on their provenance.""" f = import_graph.provenance[node] if isinstance(f, resolve.Direct): out = '+ ' + f.short_path elif isinstance(f, resolve.Local): out = ' ' + f.short_path elif isinstance(f, resolve.System): out = ':: ' + f.short_path elif isinstance(f, resolve.Builtin): out = '(%s)' % f.module_name else: out = '%r' % node return ' '*indent + out
[ "def", "format_file_node", "(", "import_graph", ",", "node", ",", "indent", ")", ":", "f", "=", "import_graph", ".", "provenance", "[", "node", "]", "if", "isinstance", "(", "f", ",", "resolve", ".", "Direct", ")", ":", "out", "=", "'+ '", "+", "f", ...
Prettyprint nodes based on their provenance.
[ "Prettyprint", "nodes", "based", "on", "their", "provenance", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/output.py#L20-L33
train
204,920
google/importlab
importlab/output.py
format_node
def format_node(import_graph, node, indent): """Helper function for print_tree""" if isinstance(node, graph.NodeSet): ind = ' ' * indent out = [ind + 'cycle {'] + [ format_file_node(import_graph, n, indent + 1) for n in node.nodes ] + [ind + '}'] return '\n'.join(out) else: return format_file_node(import_graph, node, indent)
python
def format_node(import_graph, node, indent): """Helper function for print_tree""" if isinstance(node, graph.NodeSet): ind = ' ' * indent out = [ind + 'cycle {'] + [ format_file_node(import_graph, n, indent + 1) for n in node.nodes ] + [ind + '}'] return '\n'.join(out) else: return format_file_node(import_graph, node, indent)
[ "def", "format_node", "(", "import_graph", ",", "node", ",", "indent", ")", ":", "if", "isinstance", "(", "node", ",", "graph", ".", "NodeSet", ")", ":", "ind", "=", "' '", "*", "indent", "out", "=", "[", "ind", "+", "'cycle {'", "]", "+", "[", "f...
Helper function for print_tree
[ "Helper", "function", "for", "print_tree" ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/output.py#L36-L46
train
204,921
google/importlab
importlab/import_finder.py
_find_package
def _find_package(parts): """Helper function for _resolve_import_versioned.""" for i in range(len(parts), 0, -1): prefix = '.'.join(parts[0:i]) if prefix in sys.modules: return i, sys.modules[prefix] return 0, None
python
def _find_package(parts): """Helper function for _resolve_import_versioned.""" for i in range(len(parts), 0, -1): prefix = '.'.join(parts[0:i]) if prefix in sys.modules: return i, sys.modules[prefix] return 0, None
[ "def", "_find_package", "(", "parts", ")", ":", "for", "i", "in", "range", "(", "len", "(", "parts", ")", ",", "0", ",", "-", "1", ")", ":", "prefix", "=", "'.'", ".", "join", "(", "parts", "[", "0", ":", "i", "]", ")", "if", "prefix", "in", ...
Helper function for _resolve_import_versioned.
[ "Helper", "function", "for", "_resolve_import_versioned", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/import_finder.py#L44-L50
train
204,922
google/importlab
importlab/import_finder.py
_resolve_import
def _resolve_import(name): """Helper function for resolve_import.""" if name in sys.modules: return getattr(sys.modules[name], '__file__', name + '.so') return _resolve_import_versioned(name)
python
def _resolve_import(name): """Helper function for resolve_import.""" if name in sys.modules: return getattr(sys.modules[name], '__file__', name + '.so') return _resolve_import_versioned(name)
[ "def", "_resolve_import", "(", "name", ")", ":", "if", "name", "in", "sys", ".", "modules", ":", "return", "getattr", "(", "sys", ".", "modules", "[", "name", "]", ",", "'__file__'", ",", "name", "+", "'.so'", ")", "return", "_resolve_import_versioned", ...
Helper function for resolve_import.
[ "Helper", "function", "for", "resolve_import", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/import_finder.py#L96-L100
train
204,923
google/importlab
importlab/import_finder.py
resolve_import
def resolve_import(name, is_from, is_star): """Use python to resolve an import. Args: name: The fully qualified module name. Returns: The path to the module source file or None. """ # Don't try to resolve relative imports or builtins here; they will be # handled by resolve.Resolver if name.startswith('.') or is_builtin(name): return None ret = _resolve_import(name) if ret is None and is_from and not is_star: package, _ = name.rsplit('.', 1) ret = _resolve_import(package) return ret
python
def resolve_import(name, is_from, is_star): """Use python to resolve an import. Args: name: The fully qualified module name. Returns: The path to the module source file or None. """ # Don't try to resolve relative imports or builtins here; they will be # handled by resolve.Resolver if name.startswith('.') or is_builtin(name): return None ret = _resolve_import(name) if ret is None and is_from and not is_star: package, _ = name.rsplit('.', 1) ret = _resolve_import(package) return ret
[ "def", "resolve_import", "(", "name", ",", "is_from", ",", "is_star", ")", ":", "# Don't try to resolve relative imports or builtins here; they will be", "# handled by resolve.Resolver", "if", "name", ".", "startswith", "(", "'.'", ")", "or", "is_builtin", "(", "name", ...
Use python to resolve an import. Args: name: The fully qualified module name. Returns: The path to the module source file or None.
[ "Use", "python", "to", "resolve", "an", "import", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/import_finder.py#L103-L120
train
204,924
google/importlab
importlab/import_finder.py
get_imports
def get_imports(filename): """Get all the imports in a file. Each import is a tuple of: (name, alias, is_from, is_star, source_file) """ with open(filename, "rb") as f: src = f.read() finder = ImportFinder() finder.visit(ast.parse(src, filename=filename)) imports = [] for i in finder.imports: name, _, is_from, is_star = i imports.append(i + (resolve_import(name, is_from, is_star),)) return imports
python
def get_imports(filename): """Get all the imports in a file. Each import is a tuple of: (name, alias, is_from, is_star, source_file) """ with open(filename, "rb") as f: src = f.read() finder = ImportFinder() finder.visit(ast.parse(src, filename=filename)) imports = [] for i in finder.imports: name, _, is_from, is_star = i imports.append(i + (resolve_import(name, is_from, is_star),)) return imports
[ "def", "get_imports", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "src", "=", "f", ".", "read", "(", ")", "finder", "=", "ImportFinder", "(", ")", "finder", ".", "visit", "(", "ast", ".", "parse",...
Get all the imports in a file. Each import is a tuple of: (name, alias, is_from, is_star, source_file)
[ "Get", "all", "the", "imports", "in", "a", "file", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/import_finder.py#L123-L137
train
204,925
google/importlab
importlab/graph.py
DependencyGraph.add_file
def add_file(self, filename): """Add a file and all its immediate dependencies to the graph.""" assert not self.final, 'Trying to mutate a final graph.' self.add_source_file(filename) resolved, unresolved = self.get_file_deps(filename) self.graph.add_node(filename) for f in resolved: self.graph.add_node(f) self.graph.add_edge(filename, f) for imp in unresolved: self.broken_deps[filename].add(imp)
python
def add_file(self, filename): """Add a file and all its immediate dependencies to the graph.""" assert not self.final, 'Trying to mutate a final graph.' self.add_source_file(filename) resolved, unresolved = self.get_file_deps(filename) self.graph.add_node(filename) for f in resolved: self.graph.add_node(f) self.graph.add_edge(filename, f) for imp in unresolved: self.broken_deps[filename].add(imp)
[ "def", "add_file", "(", "self", ",", "filename", ")", ":", "assert", "not", "self", ".", "final", ",", "'Trying to mutate a final graph.'", "self", ".", "add_source_file", "(", "filename", ")", "resolved", ",", "unresolved", "=", "self", ".", "get_file_deps", ...
Add a file and all its immediate dependencies to the graph.
[ "Add", "a", "file", "and", "all", "its", "immediate", "dependencies", "to", "the", "graph", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/graph.py#L68-L79
train
204,926
google/importlab
importlab/graph.py
DependencyGraph.follow_file
def follow_file(self, f, seen, trim): """Whether to recurse into a file's dependencies.""" return (f not in self.graph.nodes and f not in seen and (not trim or not isinstance(self.provenance[f], (resolve.Builtin, resolve.System))))
python
def follow_file(self, f, seen, trim): """Whether to recurse into a file's dependencies.""" return (f not in self.graph.nodes and f not in seen and (not trim or not isinstance(self.provenance[f], (resolve.Builtin, resolve.System))))
[ "def", "follow_file", "(", "self", ",", "f", ",", "seen", ",", "trim", ")", ":", "return", "(", "f", "not", "in", "self", ".", "graph", ".", "nodes", "and", "f", "not", "in", "seen", "and", "(", "not", "trim", "or", "not", "isinstance", "(", "sel...
Whether to recurse into a file's dependencies.
[ "Whether", "to", "recurse", "into", "a", "file", "s", "dependencies", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/graph.py#L81-L87
train
204,927
google/importlab
importlab/graph.py
DependencyGraph.add_file_recursive
def add_file_recursive(self, filename, trim=False): """Add a file and all its recursive dependencies to the graph. Args: filename: The name of the file. trim: Whether to trim the dependencies of builtin and system files. """ assert not self.final, 'Trying to mutate a final graph.' self.add_source_file(filename) queue = collections.deque([filename]) seen = set() while queue: filename = queue.popleft() self.graph.add_node(filename) try: deps, broken = self.get_file_deps(filename) except parsepy.ParseError: # Python couldn't parse `filename`. If we're sure that it is a # Python file, we mark it as unreadable and keep the node in the # graph so importlab's callers can do their own syntax error # handling if desired. if filename.endswith('.py'): self.unreadable_files.add(filename) else: self.graph.remove_node(filename) continue for f in broken: self.broken_deps[filename].add(f) for f in deps: if self.follow_file(f, seen, trim): queue.append(f) seen.add(f) self.graph.add_node(f) self.graph.add_edge(filename, f)
python
def add_file_recursive(self, filename, trim=False): """Add a file and all its recursive dependencies to the graph. Args: filename: The name of the file. trim: Whether to trim the dependencies of builtin and system files. """ assert not self.final, 'Trying to mutate a final graph.' self.add_source_file(filename) queue = collections.deque([filename]) seen = set() while queue: filename = queue.popleft() self.graph.add_node(filename) try: deps, broken = self.get_file_deps(filename) except parsepy.ParseError: # Python couldn't parse `filename`. If we're sure that it is a # Python file, we mark it as unreadable and keep the node in the # graph so importlab's callers can do their own syntax error # handling if desired. if filename.endswith('.py'): self.unreadable_files.add(filename) else: self.graph.remove_node(filename) continue for f in broken: self.broken_deps[filename].add(f) for f in deps: if self.follow_file(f, seen, trim): queue.append(f) seen.add(f) self.graph.add_node(f) self.graph.add_edge(filename, f)
[ "def", "add_file_recursive", "(", "self", ",", "filename", ",", "trim", "=", "False", ")", ":", "assert", "not", "self", ".", "final", ",", "'Trying to mutate a final graph.'", "self", ".", "add_source_file", "(", "filename", ")", "queue", "=", "collections", ...
Add a file and all its recursive dependencies to the graph. Args: filename: The name of the file. trim: Whether to trim the dependencies of builtin and system files.
[ "Add", "a", "file", "and", "all", "its", "recursive", "dependencies", "to", "the", "graph", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/graph.py#L89-L123
train
204,928
google/importlab
importlab/graph.py
DependencyGraph.shrink_to_node
def shrink_to_node(self, scc): """Shrink a strongly connected component into a node.""" assert not self.final, 'Trying to mutate a final graph.' self.graph.add_node(scc) edges = list(self.graph.edges) for k, v in edges: if k not in scc and v in scc: self.graph.remove_edge(k, v) self.graph.add_edge(k, scc) elif k in scc and v not in scc: self.graph.remove_edge(k, v) self.graph.add_edge(scc, v) for node in scc.nodes: self.graph.remove_node(node)
python
def shrink_to_node(self, scc): """Shrink a strongly connected component into a node.""" assert not self.final, 'Trying to mutate a final graph.' self.graph.add_node(scc) edges = list(self.graph.edges) for k, v in edges: if k not in scc and v in scc: self.graph.remove_edge(k, v) self.graph.add_edge(k, scc) elif k in scc and v not in scc: self.graph.remove_edge(k, v) self.graph.add_edge(scc, v) for node in scc.nodes: self.graph.remove_node(node)
[ "def", "shrink_to_node", "(", "self", ",", "scc", ")", ":", "assert", "not", "self", ".", "final", ",", "'Trying to mutate a final graph.'", "self", ".", "graph", ".", "add_node", "(", "scc", ")", "edges", "=", "list", "(", "self", ".", "graph", ".", "ed...
Shrink a strongly connected component into a node.
[ "Shrink", "a", "strongly", "connected", "component", "into", "a", "node", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/graph.py#L125-L138
train
204,929
google/importlab
importlab/graph.py
DependencyGraph.build
def build(self): """Finalise the graph, after adding all input files to it.""" assert not self.final, 'Trying to mutate a final graph.' # Replace each strongly connected component with a single node `NodeSet` for scc in sorted(nx.kosaraju_strongly_connected_components(self.graph), key=len, reverse=True): if len(scc) == 1: break self.shrink_to_node(NodeSet(scc)) self.final = True
python
def build(self): """Finalise the graph, after adding all input files to it.""" assert not self.final, 'Trying to mutate a final graph.' # Replace each strongly connected component with a single node `NodeSet` for scc in sorted(nx.kosaraju_strongly_connected_components(self.graph), key=len, reverse=True): if len(scc) == 1: break self.shrink_to_node(NodeSet(scc)) self.final = True
[ "def", "build", "(", "self", ")", ":", "assert", "not", "self", ".", "final", ",", "'Trying to mutate a final graph.'", "# Replace each strongly connected component with a single node `NodeSet`", "for", "scc", "in", "sorted", "(", "nx", ".", "kosaraju_strongly_connected_com...
Finalise the graph, after adding all input files to it.
[ "Finalise", "the", "graph", "after", "adding", "all", "input", "files", "to", "it", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/graph.py#L156-L169
train
204,930
google/importlab
importlab/graph.py
DependencyGraph.sorted_source_files
def sorted_source_files(self): """Returns a list of targets in topologically sorted order.""" assert self.final, 'Call build() before using the graph.' out = [] for node in nx.topological_sort(self.graph): if isinstance(node, NodeSet): out.append(node.nodes) else: # add a one-element list for uniformity out.append([node]) return list(reversed(out))
python
def sorted_source_files(self): """Returns a list of targets in topologically sorted order.""" assert self.final, 'Call build() before using the graph.' out = [] for node in nx.topological_sort(self.graph): if isinstance(node, NodeSet): out.append(node.nodes) else: # add a one-element list for uniformity out.append([node]) return list(reversed(out))
[ "def", "sorted_source_files", "(", "self", ")", ":", "assert", "self", ".", "final", ",", "'Call build() before using the graph.'", "out", "=", "[", "]", "for", "node", "in", "nx", ".", "topological_sort", "(", "self", ".", "graph", ")", ":", "if", "isinstan...
Returns a list of targets in topologically sorted order.
[ "Returns", "a", "list", "of", "targets", "in", "topologically", "sorted", "order", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/graph.py#L171-L182
train
204,931
google/importlab
importlab/graph.py
DependencyGraph.get_all_unresolved
def get_all_unresolved(self): """Returns a set of all unresolved imports.""" assert self.final, 'Call build() before using the graph.' out = set() for v in self.broken_deps.values(): out |= v return out
python
def get_all_unresolved(self): """Returns a set of all unresolved imports.""" assert self.final, 'Call build() before using the graph.' out = set() for v in self.broken_deps.values(): out |= v return out
[ "def", "get_all_unresolved", "(", "self", ")", ":", "assert", "self", ".", "final", ",", "'Call build() before using the graph.'", "out", "=", "set", "(", ")", "for", "v", "in", "self", ".", "broken_deps", ".", "values", "(", ")", ":", "out", "|=", "v", ...
Returns a set of all unresolved imports.
[ "Returns", "a", "set", "of", "all", "unresolved", "imports", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/graph.py#L194-L200
train
204,932
google/importlab
importlab/graph.py
ImportGraph.create
def create(cls, env, filenames, trim=False): """Create and return a final graph. Args: env: An environment.Environment object filenames: A list of filenames trim: Whether to trim the dependencies of builtin and system files. Returns: An immutable ImportGraph with the recursive dependencies of all the files in filenames """ import_graph = cls(env) for filename in filenames: import_graph.add_file_recursive(os.path.abspath(filename), trim) import_graph.build() return import_graph
python
def create(cls, env, filenames, trim=False): """Create and return a final graph. Args: env: An environment.Environment object filenames: A list of filenames trim: Whether to trim the dependencies of builtin and system files. Returns: An immutable ImportGraph with the recursive dependencies of all the files in filenames """ import_graph = cls(env) for filename in filenames: import_graph.add_file_recursive(os.path.abspath(filename), trim) import_graph.build() return import_graph
[ "def", "create", "(", "cls", ",", "env", ",", "filenames", ",", "trim", "=", "False", ")", ":", "import_graph", "=", "cls", "(", "env", ")", "for", "filename", "in", "filenames", ":", "import_graph", ".", "add_file_recursive", "(", "os", ".", "path", "...
Create and return a final graph. Args: env: An environment.Environment object filenames: A list of filenames trim: Whether to trim the dependencies of builtin and system files. Returns: An immutable ImportGraph with the recursive dependencies of all the files in filenames
[ "Create", "and", "return", "a", "final", "graph", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/graph.py#L213-L229
train
204,933
google/importlab
importlab/graph.py
ImportGraph.get_source_file_provenance
def get_source_file_provenance(self, filename): """Infer the module name if possible.""" module_name = resolve.infer_module_name(filename, self.path) return resolve.Direct(filename, module_name)
python
def get_source_file_provenance(self, filename): """Infer the module name if possible.""" module_name = resolve.infer_module_name(filename, self.path) return resolve.Direct(filename, module_name)
[ "def", "get_source_file_provenance", "(", "self", ",", "filename", ")", ":", "module_name", "=", "resolve", ".", "infer_module_name", "(", "filename", ",", "self", ".", "path", ")", "return", "resolve", ".", "Direct", "(", "filename", ",", "module_name", ")" ]
Infer the module name if possible.
[ "Infer", "the", "module", "name", "if", "possible", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/graph.py#L231-L234
train
204,934
google/importlab
importlab/utils.py
collect_files
def collect_files(path, extension): """Collect all the files with extension in a directory tree.""" # We should only call this on an actual directory; callers should do the # validation. assert os.path.isdir(path) out = [] # glob would be faster (see PEP471) but python glob doesn't do **/* for root, _, files in os.walk(path): out += [os.path.join(root, f) for f in files if f.endswith(extension)] return out
python
def collect_files(path, extension): """Collect all the files with extension in a directory tree.""" # We should only call this on an actual directory; callers should do the # validation. assert os.path.isdir(path) out = [] # glob would be faster (see PEP471) but python glob doesn't do **/* for root, _, files in os.walk(path): out += [os.path.join(root, f) for f in files if f.endswith(extension)] return out
[ "def", "collect_files", "(", "path", ",", "extension", ")", ":", "# We should only call this on an actual directory; callers should do the", "# validation.", "assert", "os", ".", "path", ".", "isdir", "(", "path", ")", "out", "=", "[", "]", "# glob would be faster (see ...
Collect all the files with extension in a directory tree.
[ "Collect", "all", "the", "files", "with", "extension", "in", "a", "directory", "tree", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/utils.py#L50-L60
train
204,935
google/importlab
importlab/utils.py
expand_source_files
def expand_source_files(filenames, cwd=None): """Expand a list of filenames passed in as sources. This is a helper function for handling command line arguments that specify a list of source files and directories. Any directories in filenames will be scanned recursively for .py files. Any files that do not end with ".py" will be dropped. Args: filenames: A list of filenames to process. cwd: An optional working directory to expand relative paths Returns: A list of sorted full paths to .py files """ out = [] for f in expand_paths(filenames, cwd): if os.path.isdir(f): # If we have a directory, collect all the .py files within it. out += collect_files(f, ".py") else: if f.endswith(".py"): out.append(f) return sorted(set(out))
python
def expand_source_files(filenames, cwd=None): """Expand a list of filenames passed in as sources. This is a helper function for handling command line arguments that specify a list of source files and directories. Any directories in filenames will be scanned recursively for .py files. Any files that do not end with ".py" will be dropped. Args: filenames: A list of filenames to process. cwd: An optional working directory to expand relative paths Returns: A list of sorted full paths to .py files """ out = [] for f in expand_paths(filenames, cwd): if os.path.isdir(f): # If we have a directory, collect all the .py files within it. out += collect_files(f, ".py") else: if f.endswith(".py"): out.append(f) return sorted(set(out))
[ "def", "expand_source_files", "(", "filenames", ",", "cwd", "=", "None", ")", ":", "out", "=", "[", "]", "for", "f", "in", "expand_paths", "(", "filenames", ",", "cwd", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "f", ")", ":", "# If we h...
Expand a list of filenames passed in as sources. This is a helper function for handling command line arguments that specify a list of source files and directories. Any directories in filenames will be scanned recursively for .py files. Any files that do not end with ".py" will be dropped. Args: filenames: A list of filenames to process. cwd: An optional working directory to expand relative paths Returns: A list of sorted full paths to .py files
[ "Expand", "a", "list", "of", "filenames", "passed", "in", "as", "sources", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/utils.py#L63-L86
train
204,936
google/importlab
importlab/utils.py
strip_suffix
def strip_suffix(string, suffix): """Remove a suffix from a string if it exists.""" if string.endswith(suffix): return string[:-(len(suffix))] return string
python
def strip_suffix(string, suffix): """Remove a suffix from a string if it exists.""" if string.endswith(suffix): return string[:-(len(suffix))] return string
[ "def", "strip_suffix", "(", "string", ",", "suffix", ")", ":", "if", "string", ".", "endswith", "(", "suffix", ")", ":", "return", "string", "[", ":", "-", "(", "len", "(", "suffix", ")", ")", "]", "return", "string" ]
Remove a suffix from a string if it exists.
[ "Remove", "a", "suffix", "from", "a", "string", "if", "it", "exists", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/utils.py#L155-L159
train
204,937
google/importlab
importlab/utils.py
Tempdir.create_directory
def create_directory(self, filename): """Create a subdirectory in the temporary directory.""" path = os.path.join(self.path, filename) makedirs(path) return path
python
def create_directory(self, filename): """Create a subdirectory in the temporary directory.""" path = os.path.join(self.path, filename) makedirs(path) return path
[ "def", "create_directory", "(", "self", ",", "filename", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "filename", ")", "makedirs", "(", "path", ")", "return", "path" ]
Create a subdirectory in the temporary directory.
[ "Create", "a", "subdirectory", "in", "the", "temporary", "directory", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/utils.py#L112-L116
train
204,938
google/importlab
importlab/utils.py
Tempdir.create_file
def create_file(self, filename, indented_data=None): """Create a file in the temporary directory. Dedents the contents. """ filedir, filename = os.path.split(filename) if filedir: self.create_directory(filedir) path = os.path.join(self.path, filedir, filename) data = indented_data if isinstance(data, bytes) and not isinstance(data, str): # This is binary data rather than text. mode = 'wb' else: mode = 'w' if data: data = textwrap.dedent(data) with open(path, mode) as fi: if data: fi.write(data) return path
python
def create_file(self, filename, indented_data=None): """Create a file in the temporary directory. Dedents the contents. """ filedir, filename = os.path.split(filename) if filedir: self.create_directory(filedir) path = os.path.join(self.path, filedir, filename) data = indented_data if isinstance(data, bytes) and not isinstance(data, str): # This is binary data rather than text. mode = 'wb' else: mode = 'w' if data: data = textwrap.dedent(data) with open(path, mode) as fi: if data: fi.write(data) return path
[ "def", "create_file", "(", "self", ",", "filename", ",", "indented_data", "=", "None", ")", ":", "filedir", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "if", "filedir", ":", "self", ".", "create_directory", "(", "filedir"...
Create a file in the temporary directory. Dedents the contents.
[ "Create", "a", "file", "in", "the", "temporary", "directory", "." ]
92090a0b4421137d1369c2ed952eda6bb4c7a155
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/utils.py#L118-L138
train
204,939
pinax/pinax-messages
pinax/messages/models.py
Message.new_reply
def new_reply(cls, thread, user, content): """ Create a new reply for an existing Thread. Mark thread as unread for all other participants, and mark thread as read by replier. """ msg = cls.objects.create(thread=thread, sender=user, content=content) thread.userthread_set.exclude(user=user).update(deleted=False, unread=True) thread.userthread_set.filter(user=user).update(deleted=False, unread=False) message_sent.send(sender=cls, message=msg, thread=thread, reply=True) return msg
python
def new_reply(cls, thread, user, content): """ Create a new reply for an existing Thread. Mark thread as unread for all other participants, and mark thread as read by replier. """ msg = cls.objects.create(thread=thread, sender=user, content=content) thread.userthread_set.exclude(user=user).update(deleted=False, unread=True) thread.userthread_set.filter(user=user).update(deleted=False, unread=False) message_sent.send(sender=cls, message=msg, thread=thread, reply=True) return msg
[ "def", "new_reply", "(", "cls", ",", "thread", ",", "user", ",", "content", ")", ":", "msg", "=", "cls", ".", "objects", ".", "create", "(", "thread", "=", "thread", ",", "sender", "=", "user", ",", "content", "=", "content", ")", "thread", ".", "u...
Create a new reply for an existing Thread. Mark thread as unread for all other participants, and mark thread as read by replier.
[ "Create", "a", "new", "reply", "for", "an", "existing", "Thread", "." ]
8403bf95ee9b36cbe8ea0bb712e5ef75ba898746
https://github.com/pinax/pinax-messages/blob/8403bf95ee9b36cbe8ea0bb712e5ef75ba898746/pinax/messages/models.py#L84-L95
train
204,940
pinax/pinax-messages
pinax/messages/models.py
Message.new_message
def new_message(cls, from_user, to_users, subject, content): """ Create a new Message and Thread. Mark thread as unread for all recipients, and mark thread as read and deleted from inbox by creator. """ thread = Thread.objects.create(subject=subject) for user in to_users: thread.userthread_set.create(user=user, deleted=False, unread=True) thread.userthread_set.create(user=from_user, deleted=True, unread=False) msg = cls.objects.create(thread=thread, sender=from_user, content=content) message_sent.send(sender=cls, message=msg, thread=thread, reply=False) return msg
python
def new_message(cls, from_user, to_users, subject, content): """ Create a new Message and Thread. Mark thread as unread for all recipients, and mark thread as read and deleted from inbox by creator. """ thread = Thread.objects.create(subject=subject) for user in to_users: thread.userthread_set.create(user=user, deleted=False, unread=True) thread.userthread_set.create(user=from_user, deleted=True, unread=False) msg = cls.objects.create(thread=thread, sender=from_user, content=content) message_sent.send(sender=cls, message=msg, thread=thread, reply=False) return msg
[ "def", "new_message", "(", "cls", ",", "from_user", ",", "to_users", ",", "subject", ",", "content", ")", ":", "thread", "=", "Thread", ".", "objects", ".", "create", "(", "subject", "=", "subject", ")", "for", "user", "in", "to_users", ":", "thread", ...
Create a new Message and Thread. Mark thread as unread for all recipients, and mark thread as read and deleted from inbox by creator.
[ "Create", "a", "new", "Message", "and", "Thread", "." ]
8403bf95ee9b36cbe8ea0bb712e5ef75ba898746
https://github.com/pinax/pinax-messages/blob/8403bf95ee9b36cbe8ea0bb712e5ef75ba898746/pinax/messages/models.py#L98-L111
train
204,941
pinax/pinax-messages
pinax/messages/templatetags/pinax_messages_tags.py
unread
def unread(thread, user): """ Check whether there are any unread messages for a particular thread for a user. """ return bool(thread.userthread_set.filter(user=user, unread=True))
python
def unread(thread, user): """ Check whether there are any unread messages for a particular thread for a user. """ return bool(thread.userthread_set.filter(user=user, unread=True))
[ "def", "unread", "(", "thread", ",", "user", ")", ":", "return", "bool", "(", "thread", ".", "userthread_set", ".", "filter", "(", "user", "=", "user", ",", "unread", "=", "True", ")", ")" ]
Check whether there are any unread messages for a particular thread for a user.
[ "Check", "whether", "there", "are", "any", "unread", "messages", "for", "a", "particular", "thread", "for", "a", "user", "." ]
8403bf95ee9b36cbe8ea0bb712e5ef75ba898746
https://github.com/pinax/pinax-messages/blob/8403bf95ee9b36cbe8ea0bb712e5ef75ba898746/pinax/messages/templatetags/pinax_messages_tags.py#L9-L13
train
204,942
bheinzerling/pyrouge
pyrouge/utils/sentence_splitter.py
PunktSentenceSplitter.split
def split(self, text): """Splits text and returns a list of the resulting sentences.""" text = cleanup(text) return self.sent_detector.tokenize(text.strip())
python
def split(self, text): """Splits text and returns a list of the resulting sentences.""" text = cleanup(text) return self.sent_detector.tokenize(text.strip())
[ "def", "split", "(", "self", ",", "text", ")", ":", "text", "=", "cleanup", "(", "text", ")", "return", "self", ".", "sent_detector", ".", "tokenize", "(", "text", ".", "strip", "(", ")", ")" ]
Splits text and returns a list of the resulting sentences.
[ "Splits", "text", "and", "returns", "a", "list", "of", "the", "resulting", "sentences", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/sentence_splitter.py#L37-L40
train
204,943
bheinzerling/pyrouge
pyrouge/utils/file_utils.py
str_from_file
def str_from_file(path): """ Return file contents as string. """ with open(path) as f: s = f.read().strip() return s
python
def str_from_file(path): """ Return file contents as string. """ with open(path) as f: s = f.read().strip() return s
[ "def", "str_from_file", "(", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "s", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "return", "s" ]
Return file contents as string.
[ "Return", "file", "contents", "as", "string", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/file_utils.py#L37-L44
train
204,944
bheinzerling/pyrouge
pyrouge/utils/file_utils.py
xml_equal
def xml_equal(xml_file1, xml_file2): """ Parse xml and convert to a canonical string representation so we don't have to worry about semantically meaningless differences """ def canonical(xml_file): # poor man's canonicalization, since we don't want to install # external packages just for unittesting s = et.tostring(et.parse(xml_file).getroot()).decode("UTF-8") s = re.sub("[\n|\t]*", "", s) s = re.sub("\s+", " ", s) s = "".join(sorted(s)).strip() return s return canonical(xml_file1) == canonical(xml_file2)
python
def xml_equal(xml_file1, xml_file2): """ Parse xml and convert to a canonical string representation so we don't have to worry about semantically meaningless differences """ def canonical(xml_file): # poor man's canonicalization, since we don't want to install # external packages just for unittesting s = et.tostring(et.parse(xml_file).getroot()).decode("UTF-8") s = re.sub("[\n|\t]*", "", s) s = re.sub("\s+", " ", s) s = "".join(sorted(s)).strip() return s return canonical(xml_file1) == canonical(xml_file2)
[ "def", "xml_equal", "(", "xml_file1", ",", "xml_file2", ")", ":", "def", "canonical", "(", "xml_file", ")", ":", "# poor man's canonicalization, since we don't want to install", "# external packages just for unittesting", "s", "=", "et", ".", "tostring", "(", "et", ".",...
Parse xml and convert to a canonical string representation so we don't have to worry about semantically meaningless differences
[ "Parse", "xml", "and", "convert", "to", "a", "canonical", "string", "representation", "so", "we", "don", "t", "have", "to", "worry", "about", "semantically", "meaningless", "differences" ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/file_utils.py#L47-L62
train
204,945
bheinzerling/pyrouge
pyrouge/utils/file_utils.py
list_files
def list_files(dir_path, recursive=True): """ Return a list of files in dir_path. """ for root, dirs, files in os.walk(dir_path): file_list = [os.path.join(root, f) for f in files] if recursive: for dir in dirs: dir = os.path.join(root, dir) file_list.extend(list_files(dir, recursive=True)) return file_list
python
def list_files(dir_path, recursive=True): """ Return a list of files in dir_path. """ for root, dirs, files in os.walk(dir_path): file_list = [os.path.join(root, f) for f in files] if recursive: for dir in dirs: dir = os.path.join(root, dir) file_list.extend(list_files(dir, recursive=True)) return file_list
[ "def", "list_files", "(", "dir_path", ",", "recursive", "=", "True", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "dir_path", ")", ":", "file_list", "=", "[", "os", ".", "path", ".", "join", "(", "root", ",", ...
Return a list of files in dir_path.
[ "Return", "a", "list", "of", "files", "in", "dir_path", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/file_utils.py#L65-L77
train
204,946
bheinzerling/pyrouge
pyrouge/utils/file_utils.py
DirectoryProcessor.process
def process(input_dir, output_dir, function): """ Apply function to all files in input_dir and save the resulting ouput files in output_dir. """ if not os.path.exists(output_dir): os.makedirs(output_dir) logger = log.get_global_console_logger() logger.info("Processing files in {}.".format(input_dir)) input_file_names = os.listdir(input_dir) for input_file_name in input_file_names: logger.info("Processing {}.".format(input_file_name)) input_file = os.path.join(input_dir, input_file_name) with codecs.open(input_file, "r", encoding="UTF-8") as f: input_string = f.read() output_string = function(input_string) output_file = os.path.join(output_dir, input_file_name) with codecs.open(output_file, "w", encoding="UTF-8") as f: f.write(output_string) logger.info("Saved processed files to {}.".format(output_dir))
python
def process(input_dir, output_dir, function): """ Apply function to all files in input_dir and save the resulting ouput files in output_dir. """ if not os.path.exists(output_dir): os.makedirs(output_dir) logger = log.get_global_console_logger() logger.info("Processing files in {}.".format(input_dir)) input_file_names = os.listdir(input_dir) for input_file_name in input_file_names: logger.info("Processing {}.".format(input_file_name)) input_file = os.path.join(input_dir, input_file_name) with codecs.open(input_file, "r", encoding="UTF-8") as f: input_string = f.read() output_string = function(input_string) output_file = os.path.join(output_dir, input_file_name) with codecs.open(output_file, "w", encoding="UTF-8") as f: f.write(output_string) logger.info("Saved processed files to {}.".format(output_dir))
[ "def", "process", "(", "input_dir", ",", "output_dir", ",", "function", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "output_dir", ")", ":", "os", ".", "makedirs", "(", "output_dir", ")", "logger", "=", "log", ".", "get_global_console_lo...
Apply function to all files in input_dir and save the resulting ouput files in output_dir.
[ "Apply", "function", "to", "all", "files", "in", "input_dir", "and", "save", "the", "resulting", "ouput", "files", "in", "output_dir", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/file_utils.py#L14-L34
train
204,947
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.split_sentences
def split_sentences(self): """ ROUGE requires texts split into sentences. In case the texts are not already split, this method can be used. """ from pyrouge.utils.sentence_splitter import PunktSentenceSplitter self.log.info("Splitting sentences.") ss = PunktSentenceSplitter() sent_split_to_string = lambda s: "\n".join(ss.split(s)) process_func = partial( DirectoryProcessor.process, function=sent_split_to_string) self.__process_summaries(process_func)
python
def split_sentences(self): """ ROUGE requires texts split into sentences. In case the texts are not already split, this method can be used. """ from pyrouge.utils.sentence_splitter import PunktSentenceSplitter self.log.info("Splitting sentences.") ss = PunktSentenceSplitter() sent_split_to_string = lambda s: "\n".join(ss.split(s)) process_func = partial( DirectoryProcessor.process, function=sent_split_to_string) self.__process_summaries(process_func)
[ "def", "split_sentences", "(", "self", ")", ":", "from", "pyrouge", ".", "utils", ".", "sentence_splitter", "import", "PunktSentenceSplitter", "self", ".", "log", ".", "info", "(", "\"Splitting sentences.\"", ")", "ss", "=", "PunktSentenceSplitter", "(", ")", "s...
ROUGE requires texts split into sentences. In case the texts are not already split, this method can be used.
[ "ROUGE", "requires", "texts", "split", "into", "sentences", ".", "In", "case", "the", "texts", "are", "not", "already", "split", "this", "method", "can", "be", "used", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L178-L190
train
204,948
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.write_config_static
def write_config_static(system_dir, system_filename_pattern, model_dir, model_filename_pattern, config_file_path, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their corresponding model summary files. pyrouge uses regular expressions to automatically find the matching model summary files for a given system summary file (cf. docstrings for system_filename_pattern and model_filename_pattern). system_dir: Path of directory containing system summaries. system_filename_pattern: Regex string for matching system summary filenames. model_dir: Path of directory containing model summaries. model_filename_pattern: Regex string for matching model summary filenames. config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ system_filenames = [f for f in os.listdir(system_dir)] system_models_tuples = [] system_filename_pattern = re.compile(system_filename_pattern) for system_filename in sorted(system_filenames): match = system_filename_pattern.match(system_filename) if match: id = match.groups(0)[0] model_filenames = Rouge155.__get_model_filenames_for_id( id, model_dir, model_filename_pattern) system_models_tuples.append( (system_filename, sorted(model_filenames))) if not system_models_tuples: raise Exception( "Did not find any files matching the pattern {} " "in the system summaries directory {}.".format( system_filename_pattern.pattern, system_dir)) with codecs.open(config_file_path, 'w', encoding='utf-8') as f: f.write('<ROUGE-EVAL version="1.55">') for task_id, (system_filename, model_filenames) in enumerate( system_models_tuples, start=1): eval_string = Rouge155.__get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames) f.write(eval_string) f.write("</ROUGE-EVAL>")
python
def write_config_static(system_dir, system_filename_pattern, model_dir, model_filename_pattern, config_file_path, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their corresponding model summary files. pyrouge uses regular expressions to automatically find the matching model summary files for a given system summary file (cf. docstrings for system_filename_pattern and model_filename_pattern). system_dir: Path of directory containing system summaries. system_filename_pattern: Regex string for matching system summary filenames. model_dir: Path of directory containing model summaries. model_filename_pattern: Regex string for matching model summary filenames. config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ system_filenames = [f for f in os.listdir(system_dir)] system_models_tuples = [] system_filename_pattern = re.compile(system_filename_pattern) for system_filename in sorted(system_filenames): match = system_filename_pattern.match(system_filename) if match: id = match.groups(0)[0] model_filenames = Rouge155.__get_model_filenames_for_id( id, model_dir, model_filename_pattern) system_models_tuples.append( (system_filename, sorted(model_filenames))) if not system_models_tuples: raise Exception( "Did not find any files matching the pattern {} " "in the system summaries directory {}.".format( system_filename_pattern.pattern, system_dir)) with codecs.open(config_file_path, 'w', encoding='utf-8') as f: f.write('<ROUGE-EVAL version="1.55">') for task_id, (system_filename, model_filenames) in enumerate( system_models_tuples, start=1): eval_string = Rouge155.__get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames) f.write(eval_string) f.write("</ROUGE-EVAL>")
[ "def", "write_config_static", "(", "system_dir", ",", "system_filename_pattern", ",", "model_dir", ",", "model_filename_pattern", ",", "config_file_path", ",", "system_id", "=", "None", ")", ":", "system_filenames", "=", "[", "f", "for", "f", "in", "os", ".", "l...
Write the ROUGE configuration file, which is basically a list of system summary files and their corresponding model summary files. pyrouge uses regular expressions to automatically find the matching model summary files for a given system summary file (cf. docstrings for system_filename_pattern and model_filename_pattern). system_dir: Path of directory containing system summaries. system_filename_pattern: Regex string for matching system summary filenames. model_dir: Path of directory containing model summaries. model_filename_pattern: Regex string for matching model summary filenames. config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output.
[ "Write", "the", "ROUGE", "configuration", "file", "which", "is", "basically", "a", "list", "of", "system", "summary", "files", "and", "their", "corresponding", "model", "summary", "files", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L238-L292
train
204,949
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.write_config
def write_config(self, config_file_path=None, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their matching model summary files. This is a non-static version of write_config_file_static(). config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ if not system_id: system_id = 1 if (not config_file_path) or (not self._config_dir): self._config_dir = mkdtemp() config_filename = "rouge_conf.xml" else: config_dir, config_filename = os.path.split(config_file_path) verify_dir(config_dir, "configuration file") self._config_file = os.path.join(self._config_dir, config_filename) Rouge155.write_config_static( self._system_dir, self._system_filename_pattern, self._model_dir, self._model_filename_pattern, self._config_file, system_id) self.log.info( "Written ROUGE configuration to {}".format(self._config_file))
python
def write_config(self, config_file_path=None, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their matching model summary files. This is a non-static version of write_config_file_static(). config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ if not system_id: system_id = 1 if (not config_file_path) or (not self._config_dir): self._config_dir = mkdtemp() config_filename = "rouge_conf.xml" else: config_dir, config_filename = os.path.split(config_file_path) verify_dir(config_dir, "configuration file") self._config_file = os.path.join(self._config_dir, config_filename) Rouge155.write_config_static( self._system_dir, self._system_filename_pattern, self._model_dir, self._model_filename_pattern, self._config_file, system_id) self.log.info( "Written ROUGE configuration to {}".format(self._config_file))
[ "def", "write_config", "(", "self", ",", "config_file_path", "=", "None", ",", "system_id", "=", "None", ")", ":", "if", "not", "system_id", ":", "system_id", "=", "1", "if", "(", "not", "config_file_path", ")", "or", "(", "not", "self", ".", "_config_di...
Write the ROUGE configuration file, which is basically a list of system summary files and their matching model summary files. This is a non-static version of write_config_file_static(). config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output.
[ "Write", "the", "ROUGE", "configuration", "file", "which", "is", "basically", "a", "list", "of", "system", "summary", "files", "and", "their", "matching", "model", "summary", "files", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L294-L320
train
204,950
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.evaluate
def evaluate(self, system_id=1, rouge_args=None): """ Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string. """ self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options env = None if hasattr(self, "_home_dir") and self._home_dir: env = {'ROUGE_EVAL_HOME': self._home_dir} self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command, env=env).decode("UTF-8") return rouge_output
python
def evaluate(self, system_id=1, rouge_args=None): """ Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string. """ self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options env = None if hasattr(self, "_home_dir") and self._home_dir: env = {'ROUGE_EVAL_HOME': self._home_dir} self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command, env=env).decode("UTF-8") return rouge_output
[ "def", "evaluate", "(", "self", ",", "system_id", "=", "1", ",", "rouge_args", "=", "None", ")", ":", "self", ".", "write_config", "(", "system_id", "=", "system_id", ")", "options", "=", "self", ".", "__get_options", "(", "rouge_args", ")", "command", "...
Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string.
[ "Run", "ROUGE", "to", "evaluate", "the", "system", "summaries", "in", "system_dir", "against", "the", "model", "summaries", "in", "model_dir", ".", "The", "summaries", "are", "assumed", "to", "be", "in", "the", "one", "-", "sentence", "-", "per", "-", "lin...
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L322-L343
train
204,951
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.convert_and_evaluate
def convert_and_evaluate(self, system_id=1, split_sentences=False, rouge_args=None): """ Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. Optionally split texts into sentences in case they aren't already. This is just a convenience method combining convert_summaries_to_rouge_format() and evaluate(). split_sentences: Optional argument specifying if sentences should be split. system_id: Optional system ID which will be printed in ROUGE's output. Returns: ROUGE output as string. """ if split_sentences: self.split_sentences() self.__write_summaries() rouge_output = self.evaluate(system_id, rouge_args) return rouge_output
python
def convert_and_evaluate(self, system_id=1, split_sentences=False, rouge_args=None): """ Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. Optionally split texts into sentences in case they aren't already. This is just a convenience method combining convert_summaries_to_rouge_format() and evaluate(). split_sentences: Optional argument specifying if sentences should be split. system_id: Optional system ID which will be printed in ROUGE's output. Returns: ROUGE output as string. """ if split_sentences: self.split_sentences() self.__write_summaries() rouge_output = self.evaluate(system_id, rouge_args) return rouge_output
[ "def", "convert_and_evaluate", "(", "self", ",", "system_id", "=", "1", ",", "split_sentences", "=", "False", ",", "rouge_args", "=", "None", ")", ":", "if", "split_sentences", ":", "self", ".", "split_sentences", "(", ")", "self", ".", "__write_summaries", ...
Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. Optionally split texts into sentences in case they aren't already. This is just a convenience method combining convert_summaries_to_rouge_format() and evaluate(). split_sentences: Optional argument specifying if sentences should be split. system_id: Optional system ID which will be printed in ROUGE's output. Returns: ROUGE output as string.
[ "Convert", "plain", "text", "summaries", "to", "ROUGE", "format", "and", "run", "ROUGE", "to", "evaluate", "the", "system", "summaries", "in", "system_dir", "against", "the", "model", "summaries", "in", "model_dir", ".", "Optionally", "split", "texts", "into", ...
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L345-L368
train
204,952
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.output_to_dict
def output_to_dict(self, output): """ Convert the ROUGE output into python dictionary for further processing. """ #0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632) pattern = re.compile( r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) " r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)") results = {} for line in output.split("\n"): match = pattern.match(line) if match: sys_id, rouge_type, measure, result, conf_begin, conf_end = \ match.groups() measure = { 'Average_R': 'recall', 'Average_P': 'precision', 'Average_F': 'f_score' }[measure] rouge_type = rouge_type.lower().replace("-", '_') key = "{}_{}".format(rouge_type, measure) results[key] = float(result) results["{}_cb".format(key)] = float(conf_begin) results["{}_ce".format(key)] = float(conf_end) return results
python
def output_to_dict(self, output): """ Convert the ROUGE output into python dictionary for further processing. """ #0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632) pattern = re.compile( r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) " r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)") results = {} for line in output.split("\n"): match = pattern.match(line) if match: sys_id, rouge_type, measure, result, conf_begin, conf_end = \ match.groups() measure = { 'Average_R': 'recall', 'Average_P': 'precision', 'Average_F': 'f_score' }[measure] rouge_type = rouge_type.lower().replace("-", '_') key = "{}_{}".format(rouge_type, measure) results[key] = float(result) results["{}_cb".format(key)] = float(conf_begin) results["{}_ce".format(key)] = float(conf_end) return results
[ "def", "output_to_dict", "(", "self", ",", "output", ")", ":", "#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)", "pattern", "=", "re", ".", "compile", "(", "r\"(\\d+) (ROUGE-\\S+) (Average_\\w): (\\d.\\d+) \"", "r\"\\(95%-conf.int. (\\d.\\d+) - (\\d.\\d+)\\)\"", ")...
Convert the ROUGE output into python dictionary for further processing.
[ "Convert", "the", "ROUGE", "output", "into", "python", "dictionary", "for", "further", "processing", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L370-L396
train
204,953
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.__set_rouge_dir
def __set_rouge_dir(self, home_dir=None): """ Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths. """ if not home_dir: self._home_dir = self.__get_rouge_home_dir_from_settings() else: self._home_dir = home_dir self.save_home_dir() self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl') self.data_dir = os.path.join(self._home_dir, 'data') if not os.path.exists(self._bin_path): raise Exception( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home.".format(self._bin_path))
python
def __set_rouge_dir(self, home_dir=None): """ Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths. """ if not home_dir: self._home_dir = self.__get_rouge_home_dir_from_settings() else: self._home_dir = home_dir self.save_home_dir() self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl') self.data_dir = os.path.join(self._home_dir, 'data') if not os.path.exists(self._bin_path): raise Exception( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home.".format(self._bin_path))
[ "def", "__set_rouge_dir", "(", "self", ",", "home_dir", "=", "None", ")", ":", "if", "not", "home_dir", ":", "self", ".", "_home_dir", "=", "self", ".", "__get_rouge_home_dir_from_settings", "(", ")", "else", ":", "self", ".", "_home_dir", "=", "home_dir", ...
Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths.
[ "Verfify", "presence", "of", "ROUGE", "-", "1", ".", "5", ".", "5", ".", "pl", "and", "data", "folder", "and", "set", "those", "paths", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L401-L418
train
204,954
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.__process_summaries
def __process_summaries(self, process_func): """ Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders. """ temp_dir = mkdtemp() new_system_dir = os.path.join(temp_dir, "system") os.mkdir(new_system_dir) new_model_dir = os.path.join(temp_dir, "model") os.mkdir(new_model_dir) self.log.info( "Processing summaries. Saving system files to {} and " "model files to {}.".format(new_system_dir, new_model_dir)) process_func(self._system_dir, new_system_dir) process_func(self._model_dir, new_model_dir) self._system_dir = new_system_dir self._model_dir = new_model_dir
python
def __process_summaries(self, process_func): """ Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders. """ temp_dir = mkdtemp() new_system_dir = os.path.join(temp_dir, "system") os.mkdir(new_system_dir) new_model_dir = os.path.join(temp_dir, "model") os.mkdir(new_model_dir) self.log.info( "Processing summaries. Saving system files to {} and " "model files to {}.".format(new_system_dir, new_model_dir)) process_func(self._system_dir, new_system_dir) process_func(self._model_dir, new_model_dir) self._system_dir = new_system_dir self._model_dir = new_model_dir
[ "def", "__process_summaries", "(", "self", ",", "process_func", ")", ":", "temp_dir", "=", "mkdtemp", "(", ")", "new_system_dir", "=", "os", ".", "path", ".", "join", "(", "temp_dir", ",", "\"system\"", ")", "os", ".", "mkdir", "(", "new_system_dir", ")", ...
Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders.
[ "Helper", "method", "that", "applies", "process_func", "to", "the", "files", "in", "the", "system", "and", "model", "folders", "and", "saves", "the", "resulting", "files", "to", "new", "system", "and", "model", "folders", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L473-L491
train
204,955
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.__get_options
def __get_options(self, rouge_args=None): """ Get supplied command line arguments for ROUGE or use default ones. """ if self.args: options = self.args.split() elif rouge_args: options = rouge_args.split() else: options = [ '-e', self._data_dir, '-c', 95, '-2', '-1', '-U', '-r', 1000, '-n', 4, '-w', 1.2, '-a', ] options = list(map(str, options)) options = self.__add_config_option(options) return options
python
def __get_options(self, rouge_args=None): """ Get supplied command line arguments for ROUGE or use default ones. """ if self.args: options = self.args.split() elif rouge_args: options = rouge_args.split() else: options = [ '-e', self._data_dir, '-c', 95, '-2', '-1', '-U', '-r', 1000, '-n', 4, '-w', 1.2, '-a', ] options = list(map(str, options)) options = self.__add_config_option(options) return options
[ "def", "__get_options", "(", "self", ",", "rouge_args", "=", "None", ")", ":", "if", "self", ".", "args", ":", "options", "=", "self", ".", "args", ".", "split", "(", ")", "elif", "rouge_args", ":", "options", "=", "rouge_args", ".", "split", "(", ")...
Get supplied command line arguments for ROUGE or use default ones.
[ "Get", "supplied", "command", "line", "arguments", "for", "ROUGE", "or", "use", "default", "ones", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L509-L534
train
204,956
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.__create_dir_property
def __create_dir_property(self, dir_name, docstring): """ Generate getter and setter for a directory property. """ property_name = "{}_dir".format(dir_name) private_name = "_" + property_name setattr(self, private_name, None) def fget(self): return getattr(self, private_name) def fset(self, path): verify_dir(path, dir_name) setattr(self, private_name, path) p = property(fget=fget, fset=fset, doc=docstring) setattr(self.__class__, property_name, p)
python
def __create_dir_property(self, dir_name, docstring): """ Generate getter and setter for a directory property. """ property_name = "{}_dir".format(dir_name) private_name = "_" + property_name setattr(self, private_name, None) def fget(self): return getattr(self, private_name) def fset(self, path): verify_dir(path, dir_name) setattr(self, private_name, path) p = property(fget=fget, fset=fset, doc=docstring) setattr(self.__class__, property_name, p)
[ "def", "__create_dir_property", "(", "self", ",", "dir_name", ",", "docstring", ")", ":", "property_name", "=", "\"{}_dir\"", ".", "format", "(", "dir_name", ")", "private_name", "=", "\"_\"", "+", "property_name", "setattr", "(", "self", ",", "private_name", ...
Generate getter and setter for a directory property.
[ "Generate", "getter", "and", "setter", "for", "a", "directory", "property", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L536-L553
train
204,957
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.__set_dir_properties
def __set_dir_properties(self): """ Automatically generate the properties for directories. """ directories = [ ("home", "The ROUGE home directory."), ("data", "The path of the ROUGE 'data' directory."), ("system", "Path of the directory containing system summaries."), ("model", "Path of the directory containing model summaries."), ] for (dirname, docstring) in directories: self.__create_dir_property(dirname, docstring)
python
def __set_dir_properties(self): """ Automatically generate the properties for directories. """ directories = [ ("home", "The ROUGE home directory."), ("data", "The path of the ROUGE 'data' directory."), ("system", "Path of the directory containing system summaries."), ("model", "Path of the directory containing model summaries."), ] for (dirname, docstring) in directories: self.__create_dir_property(dirname, docstring)
[ "def", "__set_dir_properties", "(", "self", ")", ":", "directories", "=", "[", "(", "\"home\"", ",", "\"The ROUGE home directory.\"", ")", ",", "(", "\"data\"", ",", "\"The path of the ROUGE 'data' directory.\"", ")", ",", "(", "\"system\"", ",", "\"Path of the direct...
Automatically generate the properties for directories.
[ "Automatically", "generate", "the", "properties", "for", "directories", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L555-L567
train
204,958
bheinzerling/pyrouge
pyrouge/Rouge155.py
Rouge155.__clean_rouge_args
def __clean_rouge_args(self, rouge_args): """ Remove enclosing quotation marks, if any. """ if not rouge_args: return quot_mark_pattern = re.compile('"(.+)"') match = quot_mark_pattern.match(rouge_args) if match: cleaned_args = match.group(1) return cleaned_args else: return rouge_args
python
def __clean_rouge_args(self, rouge_args): """ Remove enclosing quotation marks, if any. """ if not rouge_args: return quot_mark_pattern = re.compile('"(.+)"') match = quot_mark_pattern.match(rouge_args) if match: cleaned_args = match.group(1) return cleaned_args else: return rouge_args
[ "def", "__clean_rouge_args", "(", "self", ",", "rouge_args", ")", ":", "if", "not", "rouge_args", ":", "return", "quot_mark_pattern", "=", "re", ".", "compile", "(", "'\"(.+)\"'", ")", "match", "=", "quot_mark_pattern", ".", "match", "(", "rouge_args", ")", ...
Remove enclosing quotation marks, if any.
[ "Remove", "enclosing", "quotation", "marks", "if", "any", "." ]
afeb37dd2608f1399e2fb24a4ee2fe10a2a18603
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L569-L582
train
204,959
NYUCCL/psiTurk
psiturk/user_utils.py
PsiTurkAuthorization.check_auth
def check_auth(self, username, password): ''' This function is called to check if a username password combination is valid. ''' return username == self.queryname and password == self.querypw
python
def check_auth(self, username, password): ''' This function is called to check if a username password combination is valid. ''' return username == self.queryname and password == self.querypw
[ "def", "check_auth", "(", "self", ",", "username", ",", "password", ")", ":", "return", "username", "==", "self", ".", "queryname", "and", "password", "==", "self", ".", "querypw" ]
This function is called to check if a username password combination is valid.
[ "This", "function", "is", "called", "to", "check", "if", "a", "username", "password", "combination", "is", "valid", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/user_utils.py#L41-L44
train
204,960
NYUCCL/psiTurk
psiturk/user_utils.py
PsiTurkAuthorization.requires_auth
def requires_auth(self, func): ''' Decorator to prompt for user name and password. Useful for data dumps, etc. That you don't want to be public. ''' @wraps(func) def decorated(*args, **kwargs): ''' Wrapper ''' auth = request.authorization if not auth or not self.check_auth(auth.username, auth.password): return self.authenticate() return func(*args, **kwargs) return decorated
python
def requires_auth(self, func): ''' Decorator to prompt for user name and password. Useful for data dumps, etc. That you don't want to be public. ''' @wraps(func) def decorated(*args, **kwargs): ''' Wrapper ''' auth = request.authorization if not auth or not self.check_auth(auth.username, auth.password): return self.authenticate() return func(*args, **kwargs) return decorated
[ "def", "requires_auth", "(", "self", ",", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "''' Wrapper '''", "auth", "=", "request", ".", "authorization", "if", "not", "auth", ...
Decorator to prompt for user name and password. Useful for data dumps, etc. That you don't want to be public.
[ "Decorator", "to", "prompt", "for", "user", "name", "and", "password", ".", "Useful", "for", "data", "dumps", "etc", ".", "That", "you", "don", "t", "want", "to", "be", "public", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/user_utils.py#L55-L67
train
204,961
NYUCCL/psiTurk
psiturk/experiment.py
handle_exp_error
def handle_exp_error(exception): """Handle errors by sending an error page.""" app.logger.error( "%s (%s) %s", exception.value, exception.errornum, str(dict(request.args))) return exception.error_page(request, CONFIG.get('HIT Configuration', 'contact_email_on_error'))
python
def handle_exp_error(exception): """Handle errors by sending an error page.""" app.logger.error( "%s (%s) %s", exception.value, exception.errornum, str(dict(request.args))) return exception.error_page(request, CONFIG.get('HIT Configuration', 'contact_email_on_error'))
[ "def", "handle_exp_error", "(", "exception", ")", ":", "app", ".", "logger", ".", "error", "(", "\"%s (%s) %s\"", ",", "exception", ".", "value", ",", "exception", ".", "errornum", ",", "str", "(", "dict", "(", "request", ".", "args", ")", ")", ")", "r...
Handle errors by sending an error page.
[ "Handle", "errors", "by", "sending", "an", "error", "page", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L104-L109
train
204,962
NYUCCL/psiTurk
psiturk/experiment.py
check_worker_status
def check_worker_status(): ''' Check worker status route ''' if 'workerId' not in request.args: resp = {"status": "bad request"} return jsonify(**resp) else: worker_id = request.args['workerId'] assignment_id = request.args['assignmentId'] allow_repeats = CONFIG.getboolean('HIT Configuration', 'allow_repeats') if allow_repeats: # if you allow repeats focus on current worker/assignment combo try: part = Participant.query.\ filter(Participant.workerid == worker_id).\ filter(Participant.assignmentid == assignment_id).one() status = part.status except exc.SQLAlchemyError: status = NOT_ACCEPTED else: # if you disallow repeats search for highest status of anything by this worker try: matches = Participant.query.\ filter(Participant.workerid == worker_id).all() numrecs = len(matches) if numrecs==0: # this should be caught by exception, but just to be safe status = NOT_ACCEPTED else: status = max([record.status for record in matches]) except exc.SQLAlchemyError: status = NOT_ACCEPTED resp = {"status" : status} return jsonify(**resp)
python
def check_worker_status(): ''' Check worker status route ''' if 'workerId' not in request.args: resp = {"status": "bad request"} return jsonify(**resp) else: worker_id = request.args['workerId'] assignment_id = request.args['assignmentId'] allow_repeats = CONFIG.getboolean('HIT Configuration', 'allow_repeats') if allow_repeats: # if you allow repeats focus on current worker/assignment combo try: part = Participant.query.\ filter(Participant.workerid == worker_id).\ filter(Participant.assignmentid == assignment_id).one() status = part.status except exc.SQLAlchemyError: status = NOT_ACCEPTED else: # if you disallow repeats search for highest status of anything by this worker try: matches = Participant.query.\ filter(Participant.workerid == worker_id).all() numrecs = len(matches) if numrecs==0: # this should be caught by exception, but just to be safe status = NOT_ACCEPTED else: status = max([record.status for record in matches]) except exc.SQLAlchemyError: status = NOT_ACCEPTED resp = {"status" : status} return jsonify(**resp)
[ "def", "check_worker_status", "(", ")", ":", "if", "'workerId'", "not", "in", "request", ".", "args", ":", "resp", "=", "{", "\"status\"", ":", "\"bad request\"", "}", "return", "jsonify", "(", "*", "*", "resp", ")", "else", ":", "worker_id", "=", "reque...
Check worker status route
[ "Check", "worker", "status", "route" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L200-L229
train
204,963
NYUCCL/psiTurk
psiturk/experiment.py
give_consent
def give_consent(): """ Serves up the consent in the popup window. """ if not ('hitId' in request.args and 'assignmentId' in request.args and 'workerId' in request.args): raise ExperimentError('hit_assign_worker_id_not_set_in_consent') hit_id = request.args['hitId'] assignment_id = request.args['assignmentId'] worker_id = request.args['workerId'] mode = request.args['mode'] with open('templates/consent.html', 'r') as temp_file: consent_string = temp_file.read() consent_string = insert_mode(consent_string, mode) return render_template_string( consent_string, hitid=hit_id, assignmentid=assignment_id, workerid=worker_id )
python
def give_consent(): """ Serves up the consent in the popup window. """ if not ('hitId' in request.args and 'assignmentId' in request.args and 'workerId' in request.args): raise ExperimentError('hit_assign_worker_id_not_set_in_consent') hit_id = request.args['hitId'] assignment_id = request.args['assignmentId'] worker_id = request.args['workerId'] mode = request.args['mode'] with open('templates/consent.html', 'r') as temp_file: consent_string = temp_file.read() consent_string = insert_mode(consent_string, mode) return render_template_string( consent_string, hitid=hit_id, assignmentid=assignment_id, workerid=worker_id )
[ "def", "give_consent", "(", ")", ":", "if", "not", "(", "'hitId'", "in", "request", ".", "args", "and", "'assignmentId'", "in", "request", ".", "args", "and", "'workerId'", "in", "request", ".", "args", ")", ":", "raise", "ExperimentError", "(", "'hit_assi...
Serves up the consent in the popup window.
[ "Serves", "up", "the", "consent", "in", "the", "popup", "window", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L346-L365
train
204,964
NYUCCL/psiTurk
psiturk/experiment.py
get_ad_via_hitid
def get_ad_via_hitid(hit_id): ''' Get ad via HIT id ''' username = CONFIG.get('psiTurk Access', 'psiturk_access_key_id') password = CONFIG.get('psiTurk Access', 'psiturk_secret_access_id') try: req = requests.get('https://api.psiturk.org/api/ad/lookup/' + hit_id, auth=(username, password)) except: raise ExperimentError('api_server_not_reachable') else: if req.status_code == 200: return req.json()['ad_id'] else: return "error"
python
def get_ad_via_hitid(hit_id): ''' Get ad via HIT id ''' username = CONFIG.get('psiTurk Access', 'psiturk_access_key_id') password = CONFIG.get('psiTurk Access', 'psiturk_secret_access_id') try: req = requests.get('https://api.psiturk.org/api/ad/lookup/' + hit_id, auth=(username, password)) except: raise ExperimentError('api_server_not_reachable') else: if req.status_code == 200: return req.json()['ad_id'] else: return "error"
[ "def", "get_ad_via_hitid", "(", "hit_id", ")", ":", "username", "=", "CONFIG", ".", "get", "(", "'psiTurk Access'", ",", "'psiturk_access_key_id'", ")", "password", "=", "CONFIG", ".", "get", "(", "'psiTurk Access'", ",", "'psiturk_secret_access_id'", ")", "try", ...
Get ad via HIT id
[ "Get", "ad", "via", "HIT", "id" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L367-L380
train
204,965
NYUCCL/psiTurk
psiturk/experiment.py
load
def load(uid=None): """ Load experiment data, which should be a JSON object and will be stored after converting to string. """ app.logger.info("GET /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") try: resp = json.loads(user.datastring) except: resp = { "condition": user.cond, "counterbalance": user.counterbalance, "assignmentId": user.assignmentid, "workerId": user.workerid, "hitId": user.hitid, "bonus": user.bonus } return jsonify(**resp)
python
def load(uid=None): """ Load experiment data, which should be a JSON object and will be stored after converting to string. """ app.logger.info("GET /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") try: resp = json.loads(user.datastring) except: resp = { "condition": user.cond, "counterbalance": user.counterbalance, "assignmentId": user.assignmentid, "workerId": user.workerid, "hitId": user.hitid, "bonus": user.bonus } return jsonify(**resp)
[ "def", "load", "(", "uid", "=", "None", ")", ":", "app", ".", "logger", ".", "info", "(", "\"GET /sync route with id: %s\"", "%", "uid", ")", "try", ":", "user", "=", "Participant", ".", "query", ".", "filter", "(", "Participant", ".", "uniqueid", "==", ...
Load experiment data, which should be a JSON object and will be stored after converting to string.
[ "Load", "experiment", "data", "which", "should", "be", "a", "JSON", "object", "and", "will", "be", "stored", "after", "converting", "to", "string", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L535-L561
train
204,966
NYUCCL/psiTurk
psiturk/experiment.py
update
def update(uid=None): """ Save experiment data, which should be a JSON object and will be stored after converting to string. """ app.logger.info("PUT /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") if hasattr(request, 'json'): user.datastring = request.data.decode('utf-8').encode( 'ascii', 'xmlcharrefreplace' ) db_session.add(user) db_session.commit() try: data = json.loads(user.datastring) except: data = {} trial = data.get("currenttrial", None) app.logger.info("saved data for %s (current trial: %s)", uid, trial) resp = {"status": "user data saved"} return jsonify(**resp)
python
def update(uid=None): """ Save experiment data, which should be a JSON object and will be stored after converting to string. """ app.logger.info("PUT /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") if hasattr(request, 'json'): user.datastring = request.data.decode('utf-8').encode( 'ascii', 'xmlcharrefreplace' ) db_session.add(user) db_session.commit() try: data = json.loads(user.datastring) except: data = {} trial = data.get("currenttrial", None) app.logger.info("saved data for %s (current trial: %s)", uid, trial) resp = {"status": "user data saved"} return jsonify(**resp)
[ "def", "update", "(", "uid", "=", "None", ")", ":", "app", ".", "logger", ".", "info", "(", "\"PUT /sync route with id: %s\"", "%", "uid", ")", "try", ":", "user", "=", "Participant", ".", "query", ".", "filter", "(", "Participant", ".", "uniqueid", "=="...
Save experiment data, which should be a JSON object and will be stored after converting to string.
[ "Save", "experiment", "data", "which", "should", "be", "a", "JSON", "object", "and", "will", "be", "stored", "after", "converting", "to", "string", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L564-L593
train
204,967
NYUCCL/psiTurk
psiturk/experiment.py
quitter
def quitter(): """ Mark quitter as such. """ unique_id = request.form['uniqueId'] if unique_id[:5] == "debug": debug_mode = True else: debug_mode = False if debug_mode: resp = {"status": "didn't mark as quitter since this is debugging"} return jsonify(**resp) else: try: unique_id = request.form['uniqueId'] app.logger.info("Marking quitter %s" % unique_id) user = Participant.query.\ filter(Participant.uniqueid == unique_id).\ one() user.status = QUITEARLY db_session.add(user) db_session.commit() except exc.SQLAlchemyError: raise ExperimentError('tried_to_quit') else: resp = {"status": "marked as quitter"} return jsonify(**resp)
python
def quitter(): """ Mark quitter as such. """ unique_id = request.form['uniqueId'] if unique_id[:5] == "debug": debug_mode = True else: debug_mode = False if debug_mode: resp = {"status": "didn't mark as quitter since this is debugging"} return jsonify(**resp) else: try: unique_id = request.form['uniqueId'] app.logger.info("Marking quitter %s" % unique_id) user = Participant.query.\ filter(Participant.uniqueid == unique_id).\ one() user.status = QUITEARLY db_session.add(user) db_session.commit() except exc.SQLAlchemyError: raise ExperimentError('tried_to_quit') else: resp = {"status": "marked as quitter"} return jsonify(**resp)
[ "def", "quitter", "(", ")", ":", "unique_id", "=", "request", ".", "form", "[", "'uniqueId'", "]", "if", "unique_id", "[", ":", "5", "]", "==", "\"debug\"", ":", "debug_mode", "=", "True", "else", ":", "debug_mode", "=", "False", "if", "debug_mode", ":...
Mark quitter as such.
[ "Mark", "quitter", "as", "such", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L596-L623
train
204,968
NYUCCL/psiTurk
psiturk/experiment.py
debug_complete
def debug_complete(): ''' Debugging route for complete. ''' if not 'uniqueId' in request.args: raise ExperimentError('improper_inputs') else: unique_id = request.args['uniqueId'] mode = request.args['mode'] try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = COMPLETED user.endhit = datetime.datetime.now() db_session.add(user) db_session.commit() except: raise ExperimentError('error_setting_worker_complete') else: if (mode == 'sandbox' or mode == 'live'): # send them back to mturk. return render_template('closepopup.html') else: return render_template('complete.html')
python
def debug_complete(): ''' Debugging route for complete. ''' if not 'uniqueId' in request.args: raise ExperimentError('improper_inputs') else: unique_id = request.args['uniqueId'] mode = request.args['mode'] try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = COMPLETED user.endhit = datetime.datetime.now() db_session.add(user) db_session.commit() except: raise ExperimentError('error_setting_worker_complete') else: if (mode == 'sandbox' or mode == 'live'): # send them back to mturk. return render_template('closepopup.html') else: return render_template('complete.html')
[ "def", "debug_complete", "(", ")", ":", "if", "not", "'uniqueId'", "in", "request", ".", "args", ":", "raise", "ExperimentError", "(", "'improper_inputs'", ")", "else", ":", "unique_id", "=", "request", ".", "args", "[", "'uniqueId'", "]", "mode", "=", "re...
Debugging route for complete.
[ "Debugging", "route", "for", "complete", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L629-L649
train
204,969
NYUCCL/psiTurk
psiturk/experiment.py
regularpage
def regularpage(foldername=None, pagename=None): """ Route not found by the other routes above. May point to a static template. """ if foldername is None and pagename is None: raise ExperimentError('page_not_found') if foldername is None and pagename is not None: return render_template(pagename) else: return render_template(foldername+"/"+pagename)
python
def regularpage(foldername=None, pagename=None): """ Route not found by the other routes above. May point to a static template. """ if foldername is None and pagename is None: raise ExperimentError('page_not_found') if foldername is None and pagename is not None: return render_template(pagename) else: return render_template(foldername+"/"+pagename)
[ "def", "regularpage", "(", "foldername", "=", "None", ",", "pagename", "=", "None", ")", ":", "if", "foldername", "is", "None", "and", "pagename", "is", "None", ":", "raise", "ExperimentError", "(", "'page_not_found'", ")", "if", "foldername", "is", "None", ...
Route not found by the other routes above. May point to a static template.
[ "Route", "not", "found", "by", "the", "other", "routes", "above", ".", "May", "point", "to", "a", "static", "template", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L724-L733
train
204,970
NYUCCL/psiTurk
psiturk/experiment.py
run_webserver
def run_webserver(): ''' Run web server ''' host = "0.0.0.0" port = CONFIG.getint('Server Parameters', 'port') print "Serving on ", "http://" + host + ":" + str(port) app.config['TEMPLATES_AUTO_RELOAD'] = True app.jinja_env.auto_reload = True app.run(debug=True, host=host, port=port)
python
def run_webserver(): ''' Run web server ''' host = "0.0.0.0" port = CONFIG.getint('Server Parameters', 'port') print "Serving on ", "http://" + host + ":" + str(port) app.config['TEMPLATES_AUTO_RELOAD'] = True app.jinja_env.auto_reload = True app.run(debug=True, host=host, port=port)
[ "def", "run_webserver", "(", ")", ":", "host", "=", "\"0.0.0.0\"", "port", "=", "CONFIG", ".", "getint", "(", "'Server Parameters'", ",", "'port'", ")", "print", "\"Serving on \"", ",", "\"http://\"", "+", "host", "+", "\":\"", "+", "str", "(", "port", ")"...
Run web server
[ "Run", "web", "server" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L735-L742
train
204,971
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.random_id_generator
def random_id_generator(self, size=6, chars=string.ascii_uppercase + string.digits): ''' Generate random id numbers ''' return ''.join(random.choice(chars) for x in range(size))
python
def random_id_generator(self, size=6, chars=string.ascii_uppercase + string.digits): ''' Generate random id numbers ''' return ''.join(random.choice(chars) for x in range(size))
[ "def", "random_id_generator", "(", "self", ",", "size", "=", "6", ",", "chars", "=", "string", ".", "ascii_uppercase", "+", "string", ".", "digits", ")", ":", "return", "''", ".", "join", "(", "random", ".", "choice", "(", "chars", ")", "for", "x", "...
Generate random id numbers
[ "Generate", "random", "id", "numbers" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L99-L102
train
204,972
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.add_bonus
def add_bonus(worker_dict): " Adds DB-logged worker bonus to worker list data " try: unique_id = '{}:{}'.format(worker_dict['workerId'], worker_dict['assignmentId']) worker = Participant.query.filter( Participant.uniqueid == unique_id).one() worker_dict['bonus'] = worker.bonus except sa.exc.InvalidRequestError: # assignment is found on mturk but not in local database. worker_dict['bonus'] = 'N/A' return worker_dict
python
def add_bonus(worker_dict): " Adds DB-logged worker bonus to worker list data " try: unique_id = '{}:{}'.format(worker_dict['workerId'], worker_dict['assignmentId']) worker = Participant.query.filter( Participant.uniqueid == unique_id).one() worker_dict['bonus'] = worker.bonus except sa.exc.InvalidRequestError: # assignment is found on mturk but not in local database. worker_dict['bonus'] = 'N/A' return worker_dict
[ "def", "add_bonus", "(", "worker_dict", ")", ":", "try", ":", "unique_id", "=", "'{}:{}'", ".", "format", "(", "worker_dict", "[", "'workerId'", "]", ",", "worker_dict", "[", "'assignmentId'", "]", ")", "worker", "=", "Participant", ".", "query", ".", "fil...
Adds DB-logged worker bonus to worker list data
[ "Adds", "DB", "-", "logged", "worker", "bonus", "to", "worker", "list", "data" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L108-L118
train
204,973
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.get_workers
def get_workers(self, status=None, chosen_hits=None, assignment_ids=None, all_studies=False): ''' Status, if set, can be one of `Submitted`, `Approved`, or `Rejected` ''' if assignment_ids: workers = [self.get_worker(assignment_id) for assignment_id in assignment_ids] else: workers = self.amt_services.get_workers(assignment_status=status, chosen_hits=chosen_hits) if workers is False: raise Exception('*** failed to get workers') if not all_studies: my_hitids = self._get_my_hitids() workers = [worker for worker in workers if worker['hitId'] in my_hitids] workers = [self.add_bonus(worker) for worker in workers] return workers
python
def get_workers(self, status=None, chosen_hits=None, assignment_ids=None, all_studies=False): ''' Status, if set, can be one of `Submitted`, `Approved`, or `Rejected` ''' if assignment_ids: workers = [self.get_worker(assignment_id) for assignment_id in assignment_ids] else: workers = self.amt_services.get_workers(assignment_status=status, chosen_hits=chosen_hits) if workers is False: raise Exception('*** failed to get workers') if not all_studies: my_hitids = self._get_my_hitids() workers = [worker for worker in workers if worker['hitId'] in my_hitids] workers = [self.add_bonus(worker) for worker in workers] return workers
[ "def", "get_workers", "(", "self", ",", "status", "=", "None", ",", "chosen_hits", "=", "None", ",", "assignment_ids", "=", "None", ",", "all_studies", "=", "False", ")", ":", "if", "assignment_ids", ":", "workers", "=", "[", "self", ".", "get_worker", "...
Status, if set, can be one of `Submitted`, `Approved`, or `Rejected`
[ "Status", "if", "set", "can", "be", "one", "of", "Submitted", "Approved", "or", "Rejected" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L120-L137
train
204,974
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.hit_extend
def hit_extend(self, hit_id, assignments, minutes): """ Add additional worker assignments or minutes to a HIT. Args: hit_id: A list conaining one hit_id string. assignments: Variable <int> for number of assignments to add. minutes: Variable <int> for number of minutes to add. Returns: A side effect of this function is that the state of a HIT changes on AMT servers. Raises: """ assert type(hit_id) is list assert type(hit_id[0]) is str if self.amt_services.extend_hit(hit_id[0], assignments, minutes): print "HIT extended."
python
def hit_extend(self, hit_id, assignments, minutes): """ Add additional worker assignments or minutes to a HIT. Args: hit_id: A list conaining one hit_id string. assignments: Variable <int> for number of assignments to add. minutes: Variable <int> for number of minutes to add. Returns: A side effect of this function is that the state of a HIT changes on AMT servers. Raises: """ assert type(hit_id) is list assert type(hit_id[0]) is str if self.amt_services.extend_hit(hit_id[0], assignments, minutes): print "HIT extended."
[ "def", "hit_extend", "(", "self", ",", "hit_id", ",", "assignments", ",", "minutes", ")", ":", "assert", "type", "(", "hit_id", ")", "is", "list", "assert", "type", "(", "hit_id", "[", "0", "]", ")", "is", "str", "if", "self", ".", "amt_services", "....
Add additional worker assignments or minutes to a HIT. Args: hit_id: A list conaining one hit_id string. assignments: Variable <int> for number of assignments to add. minutes: Variable <int> for number of minutes to add. Returns: A side effect of this function is that the state of a HIT changes on AMT servers. Raises:
[ "Add", "additional", "worker", "assignments", "or", "minutes", "to", "a", "HIT", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L323-L343
train
204,975
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.hit_delete
def hit_delete(self, all_hits, hit_ids=None): ''' Delete HIT. ''' if all_hits: hits_data = self.amt_services.get_all_hits() hit_ids = [hit.options['hitid'] for hit in hits_data if \ hit.options['status'] == "Reviewable"] for hit in hit_ids: # Check that the HIT is reviewable status = self.amt_services.get_hit_status(hit) if not status: print "*** Error getting hit status" return if self.amt_services.get_hit_status(hit) != "Reviewable": print("*** This hit is not 'Reviewable' and so can not be " "deleted") return else: success = self.amt_services.delete_hit(hit) # self.web_services.delete_ad(hit) # also delete the ad if success: if self.sandbox: print "deleting sandbox HIT", hit else: print "deleting live HIT", hit
python
def hit_delete(self, all_hits, hit_ids=None): ''' Delete HIT. ''' if all_hits: hits_data = self.amt_services.get_all_hits() hit_ids = [hit.options['hitid'] for hit in hits_data if \ hit.options['status'] == "Reviewable"] for hit in hit_ids: # Check that the HIT is reviewable status = self.amt_services.get_hit_status(hit) if not status: print "*** Error getting hit status" return if self.amt_services.get_hit_status(hit) != "Reviewable": print("*** This hit is not 'Reviewable' and so can not be " "deleted") return else: success = self.amt_services.delete_hit(hit) # self.web_services.delete_ad(hit) # also delete the ad if success: if self.sandbox: print "deleting sandbox HIT", hit else: print "deleting live HIT", hit
[ "def", "hit_delete", "(", "self", ",", "all_hits", ",", "hit_ids", "=", "None", ")", ":", "if", "all_hits", ":", "hits_data", "=", "self", ".", "amt_services", ".", "get_all_hits", "(", ")", "hit_ids", "=", "[", "hit", ".", "options", "[", "'hitid'", "...
Delete HIT.
[ "Delete", "HIT", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L345-L368
train
204,976
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.hit_expire
def hit_expire(self, all_hits, hit_ids=None): ''' Expire all HITs. ''' if all_hits: hits_data = self.get_active_hits() hit_ids = [hit.options['hitid'] for hit in hits_data] for hit in hit_ids: success = self.amt_services.expire_hit(hit) if success: if self.sandbox: print "expiring sandbox HIT", hit else: print "expiring live HIT", hit
python
def hit_expire(self, all_hits, hit_ids=None): ''' Expire all HITs. ''' if all_hits: hits_data = self.get_active_hits() hit_ids = [hit.options['hitid'] for hit in hits_data] for hit in hit_ids: success = self.amt_services.expire_hit(hit) if success: if self.sandbox: print "expiring sandbox HIT", hit else: print "expiring live HIT", hit
[ "def", "hit_expire", "(", "self", ",", "all_hits", ",", "hit_ids", "=", "None", ")", ":", "if", "all_hits", ":", "hits_data", "=", "self", ".", "get_active_hits", "(", ")", "hit_ids", "=", "[", "hit", ".", "options", "[", "'hitid'", "]", "for", "hit", ...
Expire all HITs.
[ "Expire", "all", "HITs", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L370-L381
train
204,977
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.hit_create
def hit_create(self, numWorkers, reward, duration): ''' Create a HIT ''' if self.sandbox: mode = 'sandbox' else: mode = 'live' server_loc = str(self.config.get('Server Parameters', 'host')) use_psiturk_ad_server = self.config.getboolean('Shell Parameters', 'use_psiturk_ad_server') if use_psiturk_ad_server: if not self.web_services.check_credentials(): error_msg = '\n'.join(['*****************************', ' Sorry, your psiTurk Credentials are invalid.\n ', ' You cannot create ads and hits until you enter valid credentials in ', ' the \'psiTurk Access\' section of ~/.psiturkconfig. You can obtain your', ' credentials or sign up at https://www.psiturk.org/login.\n']) raise Exception(error_msg) if not self.amt_services.verify_aws_login(): error_msg = '\n'.join(['*****************************', ' Sorry, your AWS Credentials are invalid.\n ', ' You cannot create ads and hits until you enter valid credentials in ', ' the \'AWS Access\' section of ~/.psiturkconfig. You can obtain your ', ' credentials via the Amazon AMT requester website.\n']) raise Exception(error_msg) ad_id = None if use_psiturk_ad_server: ad_id = self.create_psiturk_ad() create_failed = False fail_msg = None if ad_id is not False: ad_location = self.web_services.get_ad_url(ad_id, int(self.sandbox)) hit_config = self.generate_hit_config(ad_location, numWorkers, reward, duration) hit_id = self.amt_services.create_hit(hit_config) if hit_id is not False: if not self.web_services.set_ad_hitid(ad_id, hit_id, int(self.sandbox)): create_failed = True fail_msg = " Unable to update Ad on http://ad.psiturk.org to point at HIT." else: create_failed = True fail_msg = " Unable to create HIT on Amazon Mechanical Turk." else: create_failed = True fail_msg = " Unable to create Ad on http://ad.psiturk.org." else: # not using psiturk ad server ad_location = "{}?mode={}".format(self.config.get('Shell Parameters', 'ad_location'), mode ) hit_config = self.generate_hit_config(ad_location, numWorkers, reward, duration) create_failed = False hit_id = self.amt_services.create_hit(hit_config) if hit_id is False: create_failed = True fail_msg = " Unable to create HIT on Amazon Mechanical Turk." if create_failed: print '\n'.join(['*****************************', ' Sorry, there was an error creating hit and registering ad.']) if fail_msg is None: fail_msg = '' raise Exception(fail_msg) return (hit_id, ad_id)
python
def hit_create(self, numWorkers, reward, duration): ''' Create a HIT ''' if self.sandbox: mode = 'sandbox' else: mode = 'live' server_loc = str(self.config.get('Server Parameters', 'host')) use_psiturk_ad_server = self.config.getboolean('Shell Parameters', 'use_psiturk_ad_server') if use_psiturk_ad_server: if not self.web_services.check_credentials(): error_msg = '\n'.join(['*****************************', ' Sorry, your psiTurk Credentials are invalid.\n ', ' You cannot create ads and hits until you enter valid credentials in ', ' the \'psiTurk Access\' section of ~/.psiturkconfig. You can obtain your', ' credentials or sign up at https://www.psiturk.org/login.\n']) raise Exception(error_msg) if not self.amt_services.verify_aws_login(): error_msg = '\n'.join(['*****************************', ' Sorry, your AWS Credentials are invalid.\n ', ' You cannot create ads and hits until you enter valid credentials in ', ' the \'AWS Access\' section of ~/.psiturkconfig. You can obtain your ', ' credentials via the Amazon AMT requester website.\n']) raise Exception(error_msg) ad_id = None if use_psiturk_ad_server: ad_id = self.create_psiturk_ad() create_failed = False fail_msg = None if ad_id is not False: ad_location = self.web_services.get_ad_url(ad_id, int(self.sandbox)) hit_config = self.generate_hit_config(ad_location, numWorkers, reward, duration) hit_id = self.amt_services.create_hit(hit_config) if hit_id is not False: if not self.web_services.set_ad_hitid(ad_id, hit_id, int(self.sandbox)): create_failed = True fail_msg = " Unable to update Ad on http://ad.psiturk.org to point at HIT." else: create_failed = True fail_msg = " Unable to create HIT on Amazon Mechanical Turk." else: create_failed = True fail_msg = " Unable to create Ad on http://ad.psiturk.org." else: # not using psiturk ad server ad_location = "{}?mode={}".format(self.config.get('Shell Parameters', 'ad_location'), mode ) hit_config = self.generate_hit_config(ad_location, numWorkers, reward, duration) create_failed = False hit_id = self.amt_services.create_hit(hit_config) if hit_id is False: create_failed = True fail_msg = " Unable to create HIT on Amazon Mechanical Turk." if create_failed: print '\n'.join(['*****************************', ' Sorry, there was an error creating hit and registering ad.']) if fail_msg is None: fail_msg = '' raise Exception(fail_msg) return (hit_id, ad_id)
[ "def", "hit_create", "(", "self", ",", "numWorkers", ",", "reward", ",", "duration", ")", ":", "if", "self", ".", "sandbox", ":", "mode", "=", "'sandbox'", "else", ":", "mode", "=", "'live'", "server_loc", "=", "str", "(", "self", ".", "config", ".", ...
Create a HIT
[ "Create", "a", "HIT" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L384-L448
train
204,978
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.db_aws_list_regions
def db_aws_list_regions(self): ''' List AWS DB regions ''' regions = self.db_services.list_regions() if regions != []: print "Avaliable AWS regions:" for reg in regions: print '\t' + reg, if reg == self.db_services.get_region(): print "(currently selected)" else: print ''
python
def db_aws_list_regions(self): ''' List AWS DB regions ''' regions = self.db_services.list_regions() if regions != []: print "Avaliable AWS regions:" for reg in regions: print '\t' + reg, if reg == self.db_services.get_region(): print "(currently selected)" else: print ''
[ "def", "db_aws_list_regions", "(", "self", ")", ":", "regions", "=", "self", ".", "db_services", ".", "list_regions", "(", ")", "if", "regions", "!=", "[", "]", ":", "print", "\"Avaliable AWS regions:\"", "for", "reg", "in", "regions", ":", "print", "'\\t'",...
List AWS DB regions
[ "List", "AWS", "DB", "regions" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L533-L543
train
204,979
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.db_aws_set_region
def db_aws_set_region(self, region_name): ''' Set AWS region ''' # interactive = False # Not used if region_name is None: # interactive = True # Not used self.db_aws_list_regions() allowed_regions = self.db_services.list_regions() region_name = "NONSENSE WORD1234" tries = 0 while region_name not in allowed_regions: if tries == 0: region_name = raw_input('Enter the name of the region you ' 'would like to use: ') else: print("*** The region name (%s) you entered is not allowed, " \ "please choose from the list printed above (use type 'db " \ "aws_list_regions'." % region_name) region_name = raw_input('Enter the name of the region you ' 'would like to use: ') tries += 1 if tries > 5: print("*** Error, region you are requesting not available. " "No changes made to regions.") return self.db_services.set_region(region_name) print "Region updated to ", region_name self.config.set('AWS Access', 'aws_region', region_name, True) if self.server.is_server_running() == 'yes': self.server_restart()
python
def db_aws_set_region(self, region_name): ''' Set AWS region ''' # interactive = False # Not used if region_name is None: # interactive = True # Not used self.db_aws_list_regions() allowed_regions = self.db_services.list_regions() region_name = "NONSENSE WORD1234" tries = 0 while region_name not in allowed_regions: if tries == 0: region_name = raw_input('Enter the name of the region you ' 'would like to use: ') else: print("*** The region name (%s) you entered is not allowed, " \ "please choose from the list printed above (use type 'db " \ "aws_list_regions'." % region_name) region_name = raw_input('Enter the name of the region you ' 'would like to use: ') tries += 1 if tries > 5: print("*** Error, region you are requesting not available. " "No changes made to regions.") return self.db_services.set_region(region_name) print "Region updated to ", region_name self.config.set('AWS Access', 'aws_region', region_name, True) if self.server.is_server_running() == 'yes': self.server_restart()
[ "def", "db_aws_set_region", "(", "self", ",", "region_name", ")", ":", "# interactive = False # Not used", "if", "region_name", "is", "None", ":", "# interactive = True # Not used", "self", ".", "db_aws_list_regions", "(", ")", "allowed_regions", "=", "self", ".", "d...
Set AWS region
[ "Set", "AWS", "region" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L549-L577
train
204,980
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.db_aws_list_instances
def db_aws_list_instances(self): ''' List AWS DB instances ''' instances = self.db_services.get_db_instances() if not instances: print("There are no DB instances associated with your AWS account " \ "in region " + self.db_services.get_region()) else: print("Here are the current DB instances associated with your AWS " \ "account in region " + self.db_services.get_region()) for dbinst in instances: print '\t'+'-'*20 print "\tInstance ID: " + dbinst.id print "\tStatus: " + dbinst.status
python
def db_aws_list_instances(self): ''' List AWS DB instances ''' instances = self.db_services.get_db_instances() if not instances: print("There are no DB instances associated with your AWS account " \ "in region " + self.db_services.get_region()) else: print("Here are the current DB instances associated with your AWS " \ "account in region " + self.db_services.get_region()) for dbinst in instances: print '\t'+'-'*20 print "\tInstance ID: " + dbinst.id print "\tStatus: " + dbinst.status
[ "def", "db_aws_list_instances", "(", "self", ")", ":", "instances", "=", "self", ".", "db_services", ".", "get_db_instances", "(", ")", "if", "not", "instances", ":", "print", "(", "\"There are no DB instances associated with your AWS account \"", "\"in region \"", "+",...
List AWS DB instances
[ "List", "AWS", "DB", "instances" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L579-L591
train
204,981
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
MTurkServicesWrapper.db_aws_delete_instance
def db_aws_delete_instance(self, instance_id): ''' Delete AWS DB instance ''' interactive = False if instance_id is None: interactive = True instances = self.db_services.get_db_instances() instance_list = [dbinst.id for dbinst in instances] if interactive: valid = False if len(instances) == 0: print("There are no instances you can delete currently. Use " "`db aws_create_instance` to make one.") return print "Here are the available instances you can delete:" for inst in instances: print "\t ", inst.id, "(", inst.status, ")" while not valid: instance_id = raw_input('Enter the instance identity you would ' 'like to delete: ') res = self.db_services.validate_instance_id(instance_id) if res is True: valid = True else: print(res + " Try again, instance name not valid. Check " \ "for typos.") if instance_id in instance_list: valid = True else: valid = False print("Try again, instance not present in this account. " "Try again checking for typos.") else: res = self.db_services.validate_instance_id(instance_id) if res is not True: print("*** Error, instance name either not valid. Try again " "checking for typos.") return if instance_id not in instance_list: print("*** Error, This instance not present in this account. " "Try again checking for typos. Run `db aws_list_instances` to " "see valid list.") return user_input = raw_input( "Deleting an instance will erase all your data associated with the " "database in that instance. Really quit? y or n:" ) if user_input == 'y': res = self.db_services.delete_db_instance(instance_id) if res: print("AWS RDS database instance %s deleted. Run `db " \ "aws_list_instances` for current status." % instance_id) else: print("*** Error deleting database instance %s. " \ "It maybe because it is still being created, deleted, or is " \ "being backed up. Run `db aws_list_instances` for current " \ "status." % instance_id) else: return
python
def db_aws_delete_instance(self, instance_id): ''' Delete AWS DB instance ''' interactive = False if instance_id is None: interactive = True instances = self.db_services.get_db_instances() instance_list = [dbinst.id for dbinst in instances] if interactive: valid = False if len(instances) == 0: print("There are no instances you can delete currently. Use " "`db aws_create_instance` to make one.") return print "Here are the available instances you can delete:" for inst in instances: print "\t ", inst.id, "(", inst.status, ")" while not valid: instance_id = raw_input('Enter the instance identity you would ' 'like to delete: ') res = self.db_services.validate_instance_id(instance_id) if res is True: valid = True else: print(res + " Try again, instance name not valid. Check " \ "for typos.") if instance_id in instance_list: valid = True else: valid = False print("Try again, instance not present in this account. " "Try again checking for typos.") else: res = self.db_services.validate_instance_id(instance_id) if res is not True: print("*** Error, instance name either not valid. Try again " "checking for typos.") return if instance_id not in instance_list: print("*** Error, This instance not present in this account. " "Try again checking for typos. Run `db aws_list_instances` to " "see valid list.") return user_input = raw_input( "Deleting an instance will erase all your data associated with the " "database in that instance. Really quit? y or n:" ) if user_input == 'y': res = self.db_services.delete_db_instance(instance_id) if res: print("AWS RDS database instance %s deleted. Run `db " \ "aws_list_instances` for current status." % instance_id) else: print("*** Error deleting database instance %s. " \ "It maybe because it is still being created, deleted, or is " \ "being backed up. Run `db aws_list_instances` for current " \ "status." % instance_id) else: return
[ "def", "db_aws_delete_instance", "(", "self", ",", "instance_id", ")", ":", "interactive", "=", "False", "if", "instance_id", "is", "None", ":", "interactive", "=", "True", "instances", "=", "self", ".", "db_services", ".", "get_db_instances", "(", ")", "insta...
Delete AWS DB instance
[ "Delete", "AWS", "DB", "instance" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L593-L653
train
204,982
NYUCCL/psiTurk
psiturk/experiment_server.py
ExperimentServer.init
def init(self, *args): '''init method Takes our custom options from self.options and creates a config dict which specifies custom settings. ''' cfg = {} for k, v in self.options.items(): if k.lower() in self.cfg.settings and v is not None: cfg[k.lower()] = v return cfg
python
def init(self, *args): '''init method Takes our custom options from self.options and creates a config dict which specifies custom settings. ''' cfg = {} for k, v in self.options.items(): if k.lower() in self.cfg.settings and v is not None: cfg[k.lower()] = v return cfg
[ "def", "init", "(", "self", ",", "*", "args", ")", ":", "cfg", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "options", ".", "items", "(", ")", ":", "if", "k", ".", "lower", "(", ")", "in", "self", ".", "cfg", ".", "settings", "an...
init method Takes our custom options from self.options and creates a config dict which specifies custom settings.
[ "init", "method", "Takes", "our", "custom", "options", "from", "self", ".", "options", "and", "creates", "a", "config", "dict", "which", "specifies", "custom", "settings", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment_server.py#L33-L42
train
204,983
NYUCCL/psiTurk
psiturk/experiment_server_controller.py
wait_until_online
def wait_until_online(function, ip, port): """ Uses Wait_For_State to wait for the server to come online, then runs the given function. """ awaiting_service = Wait_For_State(lambda: not is_port_available(ip, port), function) awaiting_service.start() return awaiting_service
python
def wait_until_online(function, ip, port): """ Uses Wait_For_State to wait for the server to come online, then runs the given function. """ awaiting_service = Wait_For_State(lambda: not is_port_available(ip, port), function) awaiting_service.start() return awaiting_service
[ "def", "wait_until_online", "(", "function", ",", "ip", ",", "port", ")", ":", "awaiting_service", "=", "Wait_For_State", "(", "lambda", ":", "not", "is_port_available", "(", "ip", ",", "port", ")", ",", "function", ")", "awaiting_service", ".", "start", "("...
Uses Wait_For_State to wait for the server to come online, then runs the given function.
[ "Uses", "Wait_For_State", "to", "wait", "for", "the", "server", "to", "come", "online", "then", "runs", "the", "given", "function", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment_server_controller.py#L31-L37
train
204,984
NYUCCL/psiTurk
psiturk/command_line.py
process
def process(): ''' Figure out how we were invoked ''' invoked_as = os.path.basename(sys.argv[0]) if invoked_as == "psiturk": launch_shell() elif invoked_as == "psiturk-server": launch_server() elif invoked_as == "psiturk-shell": launch_shell() elif invoked_as == "psiturk-setup-example": setup_example() elif invoked_as == "psiturk-install": install_from_exchange()
python
def process(): ''' Figure out how we were invoked ''' invoked_as = os.path.basename(sys.argv[0]) if invoked_as == "psiturk": launch_shell() elif invoked_as == "psiturk-server": launch_server() elif invoked_as == "psiturk-shell": launch_shell() elif invoked_as == "psiturk-setup-example": setup_example() elif invoked_as == "psiturk-install": install_from_exchange()
[ "def", "process", "(", ")", ":", "invoked_as", "=", "os", ".", "path", ".", "basename", "(", "sys", ".", "argv", "[", "0", "]", ")", "if", "invoked_as", "==", "\"psiturk\"", ":", "launch_shell", "(", ")", "elif", "invoked_as", "==", "\"psiturk-server\"",...
Figure out how we were invoked
[ "Figure", "out", "how", "we", "were", "invoked" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/command_line.py#L10-L23
train
204,985
NYUCCL/psiTurk
psiturk/command_line.py
install_from_exchange
def install_from_exchange(): ''' Install from experiment exchange. ''' parser = argparse.ArgumentParser( description='Download experiment from the psiturk.org experiment\ exchange (http://psiturk.org/ee).' ) parser.add_argument( 'exp_id', metavar='exp_id', type=str, help='the id number of the\ experiment in the exchange' ) args = parser.parse_args() exp_exch = ExperimentExchangeServices() exp_exch.download_experiment(args.exp_id)
python
def install_from_exchange(): ''' Install from experiment exchange. ''' parser = argparse.ArgumentParser( description='Download experiment from the psiturk.org experiment\ exchange (http://psiturk.org/ee).' ) parser.add_argument( 'exp_id', metavar='exp_id', type=str, help='the id number of the\ experiment in the exchange' ) args = parser.parse_args() exp_exch = ExperimentExchangeServices() exp_exch.download_experiment(args.exp_id)
[ "def", "install_from_exchange", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Download experiment from the psiturk.org experiment\\\n exchange (http://psiturk.org/ee).'", ")", "parser", ".", "add_argument", "(", "'exp_id'", ...
Install from experiment exchange.
[ "Install", "from", "experiment", "exchange", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/command_line.py#L25-L37
train
204,986
NYUCCL/psiTurk
psiturk/command_line.py
setup_example
def setup_example(): ''' Add commands for testing, etc. ''' parser = argparse.ArgumentParser( description='Creates a simple default project (stroop) in the current\ directory with the necessary psiTurk files.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.setup_example as se se.setup_example()
python
def setup_example(): ''' Add commands for testing, etc. ''' parser = argparse.ArgumentParser( description='Creates a simple default project (stroop) in the current\ directory with the necessary psiTurk files.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.setup_example as se se.setup_example()
[ "def", "setup_example", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Creates a simple default project (stroop) in the current\\\n directory with the necessary psiTurk files.'", ")", "# Optional flags", "parser", ".", "add_argu...
Add commands for testing, etc.
[ "Add", "commands", "for", "testing", "etc", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/command_line.py#L39-L57
train
204,987
NYUCCL/psiTurk
psiturk/utils.py
colorize
def colorize(target, color, use_escape=True): ''' Colorize target string. Set use_escape to false when text will not be interpreted by readline, such as in intro message.''' def escape(code): ''' Escape character ''' return '\001%s\002' % code if color == 'purple': color_code = '\033[95m' elif color == 'cyan': color_code = '\033[96m' elif color == 'darkcyan': color_code = '\033[36m' elif color == 'blue': color_code = '\033[93m' elif color == 'green': color_code = '\033[92m' elif color == 'yellow': color_code = '\033[93m' elif color == 'red': color_code = '\033[91m' elif color == 'white': color_code = '\033[37m' elif color == 'bold': color_code = '\033[1m' elif color == 'underline': color_code = '\033[4m' else: color_code = '' if use_escape: return escape(color_code) + target + escape('\033[0m') else: return color_code + target + '\033[m'
python
def colorize(target, color, use_escape=True): ''' Colorize target string. Set use_escape to false when text will not be interpreted by readline, such as in intro message.''' def escape(code): ''' Escape character ''' return '\001%s\002' % code if color == 'purple': color_code = '\033[95m' elif color == 'cyan': color_code = '\033[96m' elif color == 'darkcyan': color_code = '\033[36m' elif color == 'blue': color_code = '\033[93m' elif color == 'green': color_code = '\033[92m' elif color == 'yellow': color_code = '\033[93m' elif color == 'red': color_code = '\033[91m' elif color == 'white': color_code = '\033[37m' elif color == 'bold': color_code = '\033[1m' elif color == 'underline': color_code = '\033[4m' else: color_code = '' if use_escape: return escape(color_code) + target + escape('\033[0m') else: return color_code + target + '\033[m'
[ "def", "colorize", "(", "target", ",", "color", ",", "use_escape", "=", "True", ")", ":", "def", "escape", "(", "code", ")", ":", "''' Escape character '''", "return", "'\\001%s\\002'", "%", "code", "if", "color", "==", "'purple'", ":", "color_code", "=", ...
Colorize target string. Set use_escape to false when text will not be interpreted by readline, such as in intro message.
[ "Colorize", "target", "string", ".", "Set", "use_escape", "to", "false", "when", "text", "will", "not", "be", "interpreted", "by", "readline", "such", "as", "in", "intro", "message", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/utils.py#L19-L50
train
204,988
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.default
def default(self, cmd): ''' Collect incorrect and mistyped commands ''' choices = ["help", "mode", "psiturk_status", "server", "shortcuts", "worker", "db", "edit", "open", "config", "show", "debug", "setup_example", "status", "tunnel", "amt_balance", "download_datafiles", "exit", "hit", "load", "quit", "save", "shell", "version"] print "%s is not a psiTurk command. See 'help'." %(cmd) print "Did you mean this?\n %s" %(process.extractOne(cmd, choices)[0])
python
def default(self, cmd): ''' Collect incorrect and mistyped commands ''' choices = ["help", "mode", "psiturk_status", "server", "shortcuts", "worker", "db", "edit", "open", "config", "show", "debug", "setup_example", "status", "tunnel", "amt_balance", "download_datafiles", "exit", "hit", "load", "quit", "save", "shell", "version"] print "%s is not a psiTurk command. See 'help'." %(cmd) print "Did you mean this?\n %s" %(process.extractOne(cmd, choices)[0])
[ "def", "default", "(", "self", ",", "cmd", ")", ":", "choices", "=", "[", "\"help\"", ",", "\"mode\"", ",", "\"psiturk_status\"", ",", "\"server\"", ",", "\"shortcuts\"", ",", "\"worker\"", ",", "\"db\"", ",", "\"edit\"", ",", "\"open\"", ",", "\"config\"", ...
Collect incorrect and mistyped commands
[ "Collect", "incorrect", "and", "mistyped", "commands" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L97-L106
train
204,989
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.check_offline_configuration
def check_offline_configuration(self): ''' Check offline configuration file''' quit_on_start = False database_url = self.config.get('Database Parameters', 'database_url') host = self.config.get('Server Parameters', 'host', 'localhost') if database_url[:6] != 'sqlite': print("*** Error: config.txt option 'database_url' set to use " "mysql://. Please change this sqllite:// while in cabin mode.") quit_on_start = True if host != 'localhost': print("*** Error: config option 'host' is not set to localhost. " "Please change this to localhost while in cabin mode.") quit_on_start = True if quit_on_start: exit()
python
def check_offline_configuration(self): ''' Check offline configuration file''' quit_on_start = False database_url = self.config.get('Database Parameters', 'database_url') host = self.config.get('Server Parameters', 'host', 'localhost') if database_url[:6] != 'sqlite': print("*** Error: config.txt option 'database_url' set to use " "mysql://. Please change this sqllite:// while in cabin mode.") quit_on_start = True if host != 'localhost': print("*** Error: config option 'host' is not set to localhost. " "Please change this to localhost while in cabin mode.") quit_on_start = True if quit_on_start: exit()
[ "def", "check_offline_configuration", "(", "self", ")", ":", "quit_on_start", "=", "False", "database_url", "=", "self", ".", "config", ".", "get", "(", "'Database Parameters'", ",", "'database_url'", ")", "host", "=", "self", ".", "config", ".", "get", "(", ...
Check offline configuration file
[ "Check", "offline", "configuration", "file" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L112-L126
train
204,990
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.get_intro_prompt
def get_intro_prompt(self): ''' Print cabin mode message ''' sys_status = open(self.help_path + 'cabin.txt', 'r') server_msg = sys_status.read() return server_msg + colorize('psiTurk version ' + version_number + '\nType "help" for more information.', 'green', False)
python
def get_intro_prompt(self): ''' Print cabin mode message ''' sys_status = open(self.help_path + 'cabin.txt', 'r') server_msg = sys_status.read() return server_msg + colorize('psiTurk version ' + version_number + '\nType "help" for more information.', 'green', False)
[ "def", "get_intro_prompt", "(", "self", ")", ":", "sys_status", "=", "open", "(", "self", ".", "help_path", "+", "'cabin.txt'", ",", "'r'", ")", "server_msg", "=", "sys_status", ".", "read", "(", ")", "return", "server_msg", "+", "colorize", "(", "'psiTurk...
Print cabin mode message
[ "Print", "cabin", "mode", "message" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L128-L134
train
204,991
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.color_prompt
def color_prompt(self): ''' Construct psiTurk shell prompt ''' prompt = '[' + colorize('psiTurk', 'bold') server_string = '' server_status = self.server.is_server_running() if server_status == 'yes': server_string = colorize('on', 'green') elif server_status == 'no': server_string = colorize('off', 'red') elif server_status == 'maybe': server_string = colorize('unknown', 'yellow') elif server_status == 'blocked': server_string = colorize('blocked', 'red') prompt += ' server:' + server_string prompt += ' mode:' + colorize('cabin', 'bold') prompt += ']$ ' self.prompt = prompt
python
def color_prompt(self): ''' Construct psiTurk shell prompt ''' prompt = '[' + colorize('psiTurk', 'bold') server_string = '' server_status = self.server.is_server_running() if server_status == 'yes': server_string = colorize('on', 'green') elif server_status == 'no': server_string = colorize('off', 'red') elif server_status == 'maybe': server_string = colorize('unknown', 'yellow') elif server_status == 'blocked': server_string = colorize('blocked', 'red') prompt += ' server:' + server_string prompt += ' mode:' + colorize('cabin', 'bold') prompt += ']$ ' self.prompt = prompt
[ "def", "color_prompt", "(", "self", ")", ":", "prompt", "=", "'['", "+", "colorize", "(", "'psiTurk'", ",", "'bold'", ")", "server_string", "=", "''", "server_status", "=", "self", ".", "server", ".", "is_server_running", "(", ")", "if", "server_status", "...
Construct psiTurk shell prompt
[ "Construct", "psiTurk", "shell", "prompt" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L140-L156
train
204,992
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.preloop
def preloop(self): ''' Keep persistent command history. ''' if not self.already_prelooped: self.already_prelooped = True open('.psiturk_history', 'a').close() # create file if it doesn't exist readline.read_history_file('.psiturk_history') for i in range(readline.get_current_history_length()): if readline.get_history_item(i) is not None: self.history.append(readline.get_history_item(i))
python
def preloop(self): ''' Keep persistent command history. ''' if not self.already_prelooped: self.already_prelooped = True open('.psiturk_history', 'a').close() # create file if it doesn't exist readline.read_history_file('.psiturk_history') for i in range(readline.get_current_history_length()): if readline.get_history_item(i) is not None: self.history.append(readline.get_history_item(i))
[ "def", "preloop", "(", "self", ")", ":", "if", "not", "self", ".", "already_prelooped", ":", "self", ".", "already_prelooped", "=", "True", "open", "(", "'.psiturk_history'", ",", "'a'", ")", ".", "close", "(", ")", "# create file if it doesn't exist", "readli...
Keep persistent command history.
[ "Keep", "persistent", "command", "history", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L158-L166
train
204,993
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.onecmd_plus_hooks
def onecmd_plus_hooks(self, line): ''' Trigger hooks after command. ''' if not line: return self.emptyline() return Cmd.onecmd_plus_hooks(self, line)
python
def onecmd_plus_hooks(self, line): ''' Trigger hooks after command. ''' if not line: return self.emptyline() return Cmd.onecmd_plus_hooks(self, line)
[ "def", "onecmd_plus_hooks", "(", "self", ",", "line", ")", ":", "if", "not", "line", ":", "return", "self", ".", "emptyline", "(", ")", "return", "Cmd", ".", "onecmd_plus_hooks", "(", "self", ",", "line", ")" ]
Trigger hooks after command.
[ "Trigger", "hooks", "after", "command", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L172-L176
train
204,994
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.postcmd
def postcmd(self, stop, line): ''' Exit cmd cleanly. ''' self.color_prompt() return Cmd.postcmd(self, stop, line)
python
def postcmd(self, stop, line): ''' Exit cmd cleanly. ''' self.color_prompt() return Cmd.postcmd(self, stop, line)
[ "def", "postcmd", "(", "self", ",", "stop", ",", "line", ")", ":", "self", ".", "color_prompt", "(", ")", "return", "Cmd", ".", "postcmd", "(", "self", ",", "stop", ",", "line", ")" ]
Exit cmd cleanly.
[ "Exit", "cmd", "cleanly", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L178-L181
train
204,995
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.hit_list
def hit_list(self, active_hits, reviewable_hits, all_studies): ''' List hits. ''' if active_hits: hits_data = self.amt_services_wrapper.get_active_hits(all_studies) elif reviewable_hits: hits_data = self.amt_services_wrapper.get_reviewable_hits(all_studies) else: hits_data = self.amt_services_wrapper.get_all_hits(all_studies) if not hits_data: print '*** no hits retrieved' else: for hit in hits_data: print hit
python
def hit_list(self, active_hits, reviewable_hits, all_studies): ''' List hits. ''' if active_hits: hits_data = self.amt_services_wrapper.get_active_hits(all_studies) elif reviewable_hits: hits_data = self.amt_services_wrapper.get_reviewable_hits(all_studies) else: hits_data = self.amt_services_wrapper.get_all_hits(all_studies) if not hits_data: print '*** no hits retrieved' else: for hit in hits_data: print hit
[ "def", "hit_list", "(", "self", ",", "active_hits", ",", "reviewable_hits", ",", "all_studies", ")", ":", "if", "active_hits", ":", "hits_data", "=", "self", ".", "amt_services_wrapper", ".", "get_active_hits", "(", "all_studies", ")", "elif", "reviewable_hits", ...
List hits.
[ "List", "hits", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L195-L207
train
204,996
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell._confirm_dialog
def _confirm_dialog(self, prompt): ''' Prompts for a 'yes' or 'no' to given prompt. ''' response = raw_input(prompt).strip().lower() valid = {'y': True, 'ye': True, 'yes': True, 'n': False, 'no': False} while True: try: return valid[response] except: response = raw_input("Please respond 'y' or 'n': ").strip().lower()
python
def _confirm_dialog(self, prompt): ''' Prompts for a 'yes' or 'no' to given prompt. ''' response = raw_input(prompt).strip().lower() valid = {'y': True, 'ye': True, 'yes': True, 'n': False, 'no': False} while True: try: return valid[response] except: response = raw_input("Please respond 'y' or 'n': ").strip().lower()
[ "def", "_confirm_dialog", "(", "self", ",", "prompt", ")", ":", "response", "=", "raw_input", "(", "prompt", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "valid", "=", "{", "'y'", ":", "True", ",", "'ye'", ":", "True", ",", "'yes'", ":", ...
Prompts for a 'yes' or 'no' to given prompt.
[ "Prompts", "for", "a", "yes", "or", "no", "to", "given", "prompt", "." ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L224-L232
train
204,997
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.server_off
def server_off(self): ''' Stop experiment server ''' if (self.server.is_server_running() == 'yes' or self.server.is_server_running() == 'maybe'): self.server.shutdown() print 'Please wait. This could take a few seconds.' time.sleep(0.5)
python
def server_off(self): ''' Stop experiment server ''' if (self.server.is_server_running() == 'yes' or self.server.is_server_running() == 'maybe'): self.server.shutdown() print 'Please wait. This could take a few seconds.' time.sleep(0.5)
[ "def", "server_off", "(", "self", ")", ":", "if", "(", "self", ".", "server", ".", "is_server_running", "(", ")", "==", "'yes'", "or", "self", ".", "server", ".", "is_server_running", "(", ")", "==", "'maybe'", ")", ":", "self", ".", "server", ".", "...
Stop experiment server
[ "Stop", "experiment", "server" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L420-L426
train
204,998
NYUCCL/psiTurk
psiturk/psiturk_shell.py
PsiturkShell.complete_config
def complete_config(self, text, line, begidx, endidx): ''' Tab-complete config command ''' return [i for i in PsiturkShell.config_commands if i.startswith(text)]
python
def complete_config(self, text, line, begidx, endidx): ''' Tab-complete config command ''' return [i for i in PsiturkShell.config_commands if i.startswith(text)]
[ "def", "complete_config", "(", "self", ",", "text", ",", "line", ",", "begidx", ",", "endidx", ")", ":", "return", "[", "i", "for", "i", "in", "PsiturkShell", ".", "config_commands", "if", "i", ".", "startswith", "(", "text", ")", "]" ]
Tab-complete config command
[ "Tab", "-", "complete", "config", "command" ]
7170b992a0b5f56c165929cf87b3d3a1f3336c36
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L516-L518
train
204,999